diff --git a/.drone.yml b/.drone.yml
index b4de8c7e1..9be28af18 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -15,13 +15,13 @@ clone:
steps:
- name: license-check
- image: quay.io/sighup/golang:1.21.5
+ image: quay.io/sighup/golang:1.23.3
pull: always
commands:
- make license-check
- name: schema-check
- image: quay.io/sighup/golang:1.21.5
+ image: quay.io/sighup/golang:1.23.3
pull: always
commands:
- |-
@@ -49,39 +49,52 @@ steps:
- license-check
- schema-check
- - name: lint-go
- image: quay.io/sighup/golang:1.21.5
- pull: always
- commands:
- - make lint-go
- depends_on:
- - license-check
- - schema-check
+ # - name: lint-go
+ # image: quay.io/sighup/golang:1.23.3
+ # pull: always
+ # commands:
+ # - make lint-go
+ # depends_on:
+ # - license-check
+ # - schema-check
- name: test-schema
- # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0
- image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1
+ image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3
pull: always
depends_on:
- license-check
- schema-check
+ environment:
+ JV_VERSION: 6.0.1
commands:
+ # we need to download `jv` for running the JSON Schemas tests.
+ - curl -L https://github.com/santhosh-tekuri/jsonschema/releases/download/v$${JV_VERSION}/jv-v$${JV_VERSION}-linux-amd64.tar.gz | tar zx --directory /usr/local/bin/
- bats -t tests/schema.sh
- name: render
- # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0
- image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1
+ image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3
pull: always
- commands:
- - echo $${NETRC_FILE} > /root/.netrc
- - furyctl vendor -H
- - kustomize build . > distribution.yml
- environment:
- NETRC_FILE:
- from_secret: NETRC_FILE
depends_on:
- license-check
- schema-check
+ environment:
+ NETRC_FILE:
+ from_secret: NETRC_FILE
+ FURYCTL_VERSION: v0.30.0-rc.1
+ FURYCTL_CONFIG: tests/e2e/kfddistribution/furyctl-init-cluster.yaml
+ FURYCTL_DISTRO_LOCATION: ./
+ FURYCTL_OUTDIR: ./
+ FURYCTL_DISABLE_ANALYTICS: "true"
+ KUBECONFIG: ./dummy
+ commands:
+ - echo $${NETRC_FILE} > /root/.netrc
+ - echo "Installing furyctl version $${FURYCTL_VERSION}..."
+ - curl -L "https://github.com/sighupio/furyctl/releases/download/$${FURYCTL_VERSION}/furyctl-$(uname -s)-amd64.tar.gz" | tar xz -C /usr/local/bin/
+ - furyctl download dependencies && furyctl dump template
+ # Move the folder with the manifests generated from the templates into the right path
+ - mv distribution $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG)
+ # Build the whole distribution
+ - kustomize build $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG)/distribution/manifests > distribution.yml
- name: check-deprecated-apis
image: us-docker.pkg.dev/fairwinds-ops/oss/pluto:v5
@@ -90,10 +103,10 @@ steps:
- render
commands:
# we use --ignore-deprecations because we don't want the CI to fail when the API has not been removed yet.
- - /pluto detect distribution.yml --ignore-deprecations --target-versions=k8s=v1.29.0
+ - /pluto detect distribution.yml --ignore-deprecations --target-versions=k8s=v1.30.0
---
-name: e2e-kubernetes-1.29
+name: e2e-kubernetes-1.30
kind: pipeline
type: docker
@@ -116,13 +129,13 @@ trigger:
steps:
- name: create Kind cluster
- image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0
+ image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0
pull: always
volumes:
- name: dockersock
path: /var/run/docker.sock
environment:
- CLUSTER_VERSION: v1.29.0
+ CLUSTER_VERSION: v1.30.6
CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}
# /drone/src is the default workdir for the pipeline
# using this folder we don't need to mount another
@@ -148,8 +161,7 @@ steps:
- kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG}
- name: e2e-kfddistribution
- # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS
- image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3
+ image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3
pull: always
# we need to use host network to access Kind API port that is listening on the worker's loopback
# beacuse we mount the host's Docker socket to run Kind.
@@ -157,7 +169,7 @@ steps:
environment:
CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}
KUBECONFIG: /drone/src/kubeconfig
- FURYCTL_VERSION: v0.29.7-rc.0
+ FURYCTL_VERSION: v0.30.0-rc.1
depends_on: [create Kind cluster]
commands:
- export KUBECONFIG=/drone/src/kubeconfig
@@ -175,7 +187,7 @@ steps:
- tests/e2e-kfddistribution.sh
- name: delete-kind-cluster
- image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0
+ image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0
volumes:
- name: dockersock
path: /var/run/docker.sock
@@ -196,13 +208,13 @@ volumes:
host:
path: /var/run/docker.sock
---
-name: e2e-kubernetes-1.29.0-1.29.1-1.29.2-1.29.3-1.29.4
+name: e2e-kubernetes-1.29.4-to-1.30.0
kind: pipeline
type: docker
depends_on:
- qa
- - e2e-kubernetes-1.29
+ - e2e-kubernetes-1.30
clone:
depth: 1
@@ -220,13 +232,13 @@ trigger:
steps:
- name: create Kind cluster
- image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0
+ image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0
pull: always
volumes:
- name: dockersock
path: /var/run/docker.sock
environment:
- CLUSTER_VERSION: v1.29.0
+ CLUSTER_VERSION: v1.30.6
CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades
# /drone/src is the default workdir for the pipeline
# using this folder we don't need to mount another
@@ -252,8 +264,7 @@ steps:
- kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG}
- name: e2e-kfddistribution
- # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS
- image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3
+ image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3
pull: always
# we need to use host network to access Kind API port that is listening on the worker's loopback
# beacuse we mount the host's Docker socket to run Kind.
@@ -261,7 +272,7 @@ steps:
environment:
CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades
KUBECONFIG: /drone/src/kubeconfig-upgrades
- FURYCTL_VERSION: v0.29.7-rc.0
+ FURYCTL_VERSION: v0.30.0-rc.1
depends_on: [create Kind cluster]
commands:
- export KUBECONFIG=/drone/src/kubeconfig-upgrades
@@ -279,7 +290,7 @@ steps:
- tests/e2e-kfddistribution-upgrades.sh
- name: delete-kind-cluster
- image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0
+ image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0
volumes:
- name: dockersock
path: /var/run/docker.sock
@@ -305,8 +316,8 @@ kind: pipeline
type: docker
depends_on:
- - e2e-kubernetes-1.29
- - e2e-kubernetes-1.29.0-1.29.1-1.29.2-1.29.3-1.29.4
+ - e2e-kubernetes-1.30
+ - e2e-kubernetes-1.29.4-to-1.30.0
platform:
os: linux
@@ -320,22 +331,6 @@ trigger:
- refs/tags/**-docs*
steps:
- - name: prepare-release-manifests
- image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1
- pull: always
- depends_on: [clone]
- environment:
- RELEASE_MANIFESTS_PATH: fury-distribution-${DRONE_TAG}.yml
- commands:
- - furyctl vendor -H
- - kustomize build . > $${RELEASE_MANIFESTS_PATH}
- when:
- ref:
- include:
- - refs/tags/**
- exclude:
- - refs/tags/**-docs*
-
- name: prepare-release-notes
image: quay.io/sighup/fury-release-notes-plugin:3.7_2.8.4
depends_on: [clone]
@@ -352,16 +347,12 @@ steps:
image: plugins/github-release
pull: always
depends_on:
- - prepare-release-manifests
- prepare-release-notes
settings:
api_key:
from_secret: github_token
file_exists: skip
files:
- - fury-distribution-${DRONE_TAG}.yml
- - Furyfile.yaml
- - kustomization.yaml
- kfd.yaml
prerelease: true
overwrite: true
@@ -381,16 +372,12 @@ steps:
image: plugins/github-release
pull: always
depends_on:
- - prepare-release-manifests
- prepare-release-notes
settings:
api_key:
from_secret: github_token
file_exists: skip
files:
- - fury-distribution-${DRONE_TAG}.yml
- - Furyfile.yaml
- - kustomization.yaml
- kfd.yaml
prerelease: false
overwrite: true
diff --git a/.tool-versions b/.tool-versions
index fdaa990a2..65e31e4a8 100644
--- a/.tool-versions
+++ b/.tool-versions
@@ -1,7 +1,7 @@
bats 1.9.0
drone 1.7.0
-golang 1.21.5
-golangci-lint 1.55.2
+golang 1.23.3
+golangci-lint 1.62.0
yq 4.33.3
jq 1.6
make 4.4.1
diff --git a/Furyfile.yaml b/Furyfile.yaml
deleted file mode 100644
index 7d1557fc1..000000000
--- a/Furyfile.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) 2022 SIGHUP s.r.l All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
----
-versions:
- auth: v0.3.0
- aws: v4.2.0
- dr: v2.3.0
- ingress: v2.3.3
- logging: v3.4.1
- monitoring: v3.2.0
- opa: v1.12.0
- networking: v1.17.0
- tracing: v1.0.3
-
-bases:
- - name: auth
- - name: aws
- - name: dr
- - name: ingress
- - name: logging
- - name: monitoring
- - name: networking
- - name: opa
- - name: tracing
-
-modules:
- - name: aws
- - name: dr
- - name: ingress
diff --git a/Makefile b/Makefile
index 51ad403b4..2ed6b4009 100644
--- a/Makefile
+++ b/Makefile
@@ -52,10 +52,10 @@ lint-go:
.PHONY: tools-go
tools-go:
- @go install github.com/evanphx/json-patch/cmd/json-patch@v5.6.0
+ @go install github.com/evanphx/json-patch/v5/cmd/json-patch@v5.9.0
@go install github.com/google/addlicense@v1.1.1
- @go install mvdan.cc/gofumpt@v0.5.0
- @go install golang.org/x/tools/cmd/goimports@v0.9.3
+ @go install mvdan.cc/gofumpt@v0.7.0
+ @go install golang.org/x/tools/cmd/goimports@v0.26.0
@go install github.com/daixiang0/gci@v0.10.1
@go install github.com/momaek/formattag@v0.0.9
@go install github.com/santhosh-tekuri/jsonschema/cmd/jv@v0.4.0
@@ -94,6 +94,20 @@ generate-docs:
@md-gen gen --input schemas/public/kfddistribution-kfd-v1alpha2.json --output docs/schemas/kfddistribution-kfd-v1alpha2.md --overwrite --banner banners/kfddistribution.md
@md-gen gen --input schemas/public/ekscluster-kfd-v1alpha2.json --output docs/schemas/ekscluster-kfd-v1alpha2.md --overwrite --banner banners/ekscluster.md
+.PHONY: generate-np-diagrams
+generate-np-diagrams:
+ docker run --rm -v $(PWD)/docs/network-policies:/workdir minlag/mermaid-cli:latest -i "/workdir/overview.md" -o "/workdir/overview.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/auth:/workdir minlag/mermaid-cli:latest -i "/workdir/sso.md" -o "/workdir/sso.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/single.md" -o "/workdir/single.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/dual.md" -o "/workdir/dual.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/loki.md" -o "/workdir/loki.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/opensearch.md" -o "/workdir/opensearch.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/mimir.md" -o "/workdir/mimir.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/prometheus.md" -o "/workdir/prometheus.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/gatekeeper.md" -o "/workdir/gatekeeper.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/kyverno.md" -o "/workdir/kyverno.png" -w 2048 -H 1536 -b white
+ docker run --rm -v $(PWD)/docs/network-policies/modules/tracing:/workdir minlag/mermaid-cli:latest -i "/workdir/tempo.md" -o "/workdir/tempo.png" -w 2048 -H 1536 -b white
+
.PHONY: dump-private-schema
dump-private-schema:
@cat schemas/public/ekscluster-kfd-v1alpha2.json | \
diff --git a/README.md b/README.md
index 286838261..42a69eaa6 100644
--- a/README.md
+++ b/README.md
@@ -7,8 +7,8 @@
Kubernetes Fury Distribution (KFD) is a certified battle-tested Kubernetes distribution based purely on upstream Kubernetes.
-[](http://ci.sighup.io/sighupio/fury-distribution)
-[](https://github.com/sighupio/fury-distribution/releases/latest)
+[](http://ci.sighup.io/sighupio/fury-distribution)
+[](https://github.com/sighupio/fury-distribution/releases/latest)
[](https://kubernetes.slack.com/archives/C0154HYTAQH)
[](https://github.com/sighupio/fury-distribution/blob/main/LICENSE)
@@ -130,9 +130,9 @@ Current supported versions of KFD are:
| KFD Version | Kubernetes Version |
| :----------------------------------------------------------------------------: | :----------------: |
-| [`1.29.4`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.4) | `1.29.x` |
-| [`1.28.4`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.4) | `1.28.x` |
-| [`1.27.9`](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.8) | `1.27.x` |
+| [`1.30.0`](https://github.com/sighupio/fury-distribution/releases/tag/v1.30.0) | `1.30.x` |
+| [`1.29.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.5) | `1.29.x` |
+| [`1.28.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.5) | `1.28.x` |
Check the [compatibility matrix][compatibility-matrix] for additional information about previous releases of the Distribution and the compatibility with `furyctl`.
@@ -174,14 +174,14 @@ KFD is open-source software and it's released under the following [LICENSE](LICE
[dr-module]: https://github.com/sighupio/fury-kubernetes-dr
[opa-module]: https://github.com/sighupio/fury-kubernetes-opa
[auth-module]: https://github.com/sighupio/fury-kubernetes-auth
-[networking-version]: https://img.shields.io/badge/release-v1.17.0-blue
-[ingress-version]: https://img.shields.io/badge/release-v2.3.3-blue
-[logging-version]: https://img.shields.io/badge/release-v3.4.1-blue
-[monitoring-version]: https://img.shields.io/badge/release-v3.2.0-blue
-[tracing-version]: https://img.shields.io/badge/release-v1.0.3-blue
-[dr-version]: https://img.shields.io/badge/release-v2.3.0-blue
-[opa-version]: https://img.shields.io/badge/release-v1.12.0-blue
-[auth-version]: https://img.shields.io/badge/release-v0.3.0-blue
+[networking-version]: https://img.shields.io/badge/release-v2.0.0-blue
+[ingress-version]: https://img.shields.io/badge/release-v3.0.1-blue
+[logging-version]: https://img.shields.io/badge/release-v4.0.0-blue
+[monitoring-version]: https://img.shields.io/badge/release-v3.3.0-blue
+[tracing-version]: https://img.shields.io/badge/release-v1.1.0-blue
+[dr-version]: https://img.shields.io/badge/release-v3.0.0-blue
+[opa-version]: https://img.shields.io/badge/release-v1.13.0-blue
+[auth-version]: https://img.shields.io/badge/release-v0.4.0-blue
diff --git a/ROADMAP.md b/ROADMAP.md
index 54ef986bc..f5e0fb0a1 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -6,21 +6,27 @@ From 2024, development will focus on hardening the distribution security-wise, p
## Q1 2024
-- KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x
-- furyctl 0.28.x release
-- Feature: Additional encryption parameters for ETCD on the OnPremises provider
-- New project release: Gangplank, a forked and updated version of Gangway
+- [x] KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x
+- [x] furyctl 0.28.x release
+- [x] Feature: Additional encryption parameters for ETCD on the OnPremises provider
+- [x] New project release: Gangplank, a forked and updated version of Gangway
## Q2 2024
-- KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x
-- furyctl 0.29.x release
-- Feature: Improved hardening for all the images used in the KFD distribution by default
-- Feature: Improved network policies for the KFD infrastructural components
+- [x] KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x
+- [x] furyctl 0.29.x release
+- [ ] Feature: Improved hardening for all the images used in the KFD distribution by default
+- [ ] Feature: Improved network policies for the KFD infrastructural components
## H2 2024
-- KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x
-- furyctl 0.30.x release
-- Feature: Add support for secured container runtimes
-- Feature: Track dependencies provenance and dependencies signing
+- [x] KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x
+- [x] furyctl 0.30.x release
+- [ ] Feature: Add support for secured container runtimes
+- [ ] Feature: Track dependencies provenance and dependencies signing
+- [x] (from Q2 2024) Feature: Optional selection of improved hardened images used in the KFD distribution installation
+- [x] (from Q2 2024) Feature: Experimental network policies for the KFD infrastructural components on the OnPremises provider
+- [ ] KFD 1.31.x release
+- [ ] furyctl 0.31.x release
+
+
diff --git a/banners/ekscluster.md b/banners/ekscluster.md
index a66d70188..873a47e0c 100644
--- a/banners/ekscluster.md
+++ b/banners/ekscluster.md
@@ -2,5 +2,11 @@
This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md
index a44f13847..797d2678f 100644
--- a/banners/kfddistribution.md
+++ b/banners/kfddistribution.md
@@ -2,5 +2,11 @@
This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
diff --git a/banners/onpremises.md b/banners/onpremises.md
index a8d8983dd..7f05c77c8 100644
--- a/banners/onpremises.md
+++ b/banners/onpremises.md
@@ -2,5 +2,11 @@
This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml
index 6c708be00..84d72930e 100644
--- a/defaults/ekscluster-kfd-v1alpha2.yaml
+++ b/defaults/ekscluster-kfd-v1alpha2.yaml
@@ -15,7 +15,7 @@ data:
relativeVendorPath: "../../vendor"
provider:
type: eks
-
+ networkPoliciesEnabled: false
# the module section will be used to fine tune each module behaviour and configuration
modules:
# ingress module configuration
@@ -47,7 +47,7 @@ data:
name: ""
create: true
# internal field, should be either the VPC ID taken from the kubernetes
- # phase or the ID of the created VPC in the Ifra phase
+ # phase or the ID of the created VPC in the Infra phase
vpcId: ""
# common configuration for nginx ingress controller
nginx:
@@ -253,6 +253,16 @@ data:
bucketName: velerobucket
schedules:
install: true
+ definitions:
+ manifests:
+ schedule: "*/15 * * * *"
+ ttl: "720h0m0s"
+ full:
+ schedule: "0 1 * * *"
+ ttl: "720h0m0s"
+ snapshotMoveData: false
+ snapshotController:
+ install: false
# auth module configuration
auth:
overrides:
diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml
index d0c790257..c943ae1df 100644
--- a/defaults/kfddistribution-kfd-v1alpha2.yaml
+++ b/defaults/kfddistribution-kfd-v1alpha2.yaml
@@ -15,7 +15,7 @@ data:
relativeVendorPath: "../../vendor"
provider:
type: none
-
+ networkPoliciesEnabled: false
# the module section will be used to fine tune each module behaviour and configuration
modules:
# ingress module configuration
@@ -240,6 +240,16 @@ data:
bucketName: velerobucket
schedules:
install: true
+ definitions:
+ manifests:
+ schedule: "*/15 * * * *"
+ ttl: "720h0m0s"
+ full:
+ schedule: "0 1 * * *"
+ ttl: "720h0m0s"
+ snapshotMoveData: false
+ snapshotController:
+ install: false
# auth module configuration
auth:
overrides:
diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml
index f26ad1e6e..d2717c3ba 100644
--- a/defaults/onpremises-kfd-v1alpha2.yaml
+++ b/defaults/onpremises-kfd-v1alpha2.yaml
@@ -15,7 +15,7 @@ data:
relativeVendorPath: "../../vendor"
provider:
type: none
-
+ networkPoliciesEnabled: false
# the module section will be used to fine tune each module behaviour and configuration
modules:
# ingress module configuration
@@ -240,6 +240,16 @@ data:
bucketName: velerobucket
schedules:
install: true
+ definitions:
+ manifests:
+ schedule: "*/15 * * * *"
+ ttl: "720h0m0s"
+ full:
+ schedule: "0 1 * * *"
+ ttl: "720h0m0s"
+ snapshotMoveData: false
+ snapshotController:
+ install: false
# auth module configuration
auth:
overrides:
diff --git a/docs/network-policies/README.md b/docs/network-policies/README.md
new file mode 100644
index 000000000..5511fbb1e
--- /dev/null
+++ b/docs/network-policies/README.md
@@ -0,0 +1,24 @@
+# Network Policies Documentation
+
+This documentation describes all Network Policies of the KFD components for the OnPremises schema.
+
+## Modules
+- [Auth](modules/auth/README.md) - Pomerium SSO
+- [Ingress](modules/ingress/README.md) - Nginx (single/dual) + Cert-manager
+- [Logging](modules/logging/README.md) - OpenSearch/Loki
+- [Monitoring](modules/monitoring/README.md) - Prometheus/Mimir
+- [OPA](modules/opa/README.md) - Gatekeeper/Kyverno
+- [Tracing](modules/tracing/README.md) - Tempo
+
+## Common Patterns
+All namespaces include:
+- Default deny-all policy
+- DNS access to kube-dns
+- Prometheus metrics collection
+- Kubernetes API server access where needed
+
+## High Level Overview
+- [Overview](overview.md)
+
+## Instructions
+Generate the new Network Policies diagrams with `make generate-np-diagrams`.
\ No newline at end of file
diff --git a/docs/network-policies/modules/auth/README.md b/docs/network-policies/modules/auth/README.md
new file mode 100644
index 000000000..488d84b6b
--- /dev/null
+++ b/docs/network-policies/modules/auth/README.md
@@ -0,0 +1,16 @@
+# Auth Module Network Policies
+
+## Components
+- Pomerium
+
+## Namespaces
+- pomerium
+
+## Network Policies List
+- deny-all
+- all-egress-kube-dns
+- pomerium-ingress-nginx
+- pomerium-egress-all
+
+## Configurations
+- [SSO with Pomerium](sso.md)
diff --git a/docs/network-policies/modules/auth/sso.md b/docs/network-policies/modules/auth/sso.md
new file mode 100644
index 000000000..5d6b816f1
--- /dev/null
+++ b/docs/network-policies/modules/auth/sso.md
@@ -0,0 +1,53 @@
+# SSO with Pomerium
+
+```mermaid
+graph TD
+ %% Namespaces
+ subgraph ingress-nginx
+ nginx[Nginx Controller]
+ end
+
+ subgraph pomerium
+ pom[Pomerium
app: pomerium]
+ acme[ACME HTTP Solver
app: cert-manager]
+ end
+
+ subgraph monitoring
+ graf[Grafana]
+ prom[Prometheus]
+ am[Alertmanager]
+ minio_monitoring[MinIO]
+ end
+
+ subgraph logging
+ osd[OpenSearch Dashboards]
+ minio_logging[MinIO]
+ end
+
+ subgraph tracing
+ minio_tracing[MinIO]
+ end
+
+ subgraph gatekeer-system
+ gpm[Gatekeeper Policy Manager]
+ end
+
+ %% External and K8s Core Components
+ dns[Kube DNS]
+ ext[External]
+
+ %% Edges
+ pom -->|"53/UDP"| dns
+ nginx -->|"8080/TCP"| pom
+ nginx -->|"8089/TCP"| acme
+ prom -->|"9090/TCP metrics"| pom
+ pom -->|"443/TCP"| ext
+ pom -->|"3000/TCP"| graf
+ pom -->|"9090/TCP"| prom
+ pom -->|"9093/TCP"| am
+ pom -->|"5601/TCP"| osd
+ pom -->|"9001/TCP"| minio_logging
+ pom -->|"9001/TCP"| minio_tracing
+ pom -->|"9001/TCP"| minio_monitoring
+ pom -->|"8080/TCP"| gpm
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/ingress/README.md b/docs/network-policies/modules/ingress/README.md
new file mode 100644
index 000000000..23eb467df
--- /dev/null
+++ b/docs/network-policies/modules/ingress/README.md
@@ -0,0 +1,35 @@
+# Ingress Module Network Policies
+
+## Components
+- Nginx Ingress Controller (single/dual mode)
+- Cert-manager
+- Forecastle
+
+## Namespaces
+- ingress-nginx
+- cert-manager
+
+## Network Policies List
+
+### Cert-manager
+- deny-all
+- all-egress-kube-dns
+- cert-manager-egress-kube-apiserver
+- cert-manager-webhook-ingress-kube-apiserver
+- cert-manager-egress-https
+- cert-manager-ingress-prometheus-metrics
+- acme-http-solver-ingress-lets-encrypt
+
+### Ingress-nginx
+- deny-all
+- all-egress-kube-dns
+- forecastle-ingress-nginx
+- forecastle-egress-kube-apiserver
+- nginx-egress-all
+- all-ingress-nginx
+- nginx-ingress-prometheus-metric
+- external-dns
+
+## Configurations
+- [Single Nginx](single.md)
+- [Dual Nginx](dual.md)
diff --git a/docs/network-policies/modules/ingress/dual.md b/docs/network-policies/modules/ingress/dual.md
new file mode 100644
index 000000000..6b988d41d
--- /dev/null
+++ b/docs/network-policies/modules/ingress/dual.md
@@ -0,0 +1,33 @@
+# Dual Nginx Configuration
+
+```mermaid
+graph TD
+ %% Namespaces
+ subgraph ingress-nginx
+ nginx[Nginx Controller
app: ingress]
+ fc[Forecastle
app: forecastle]
+ end
+
+ subgraph cert-manager
+ cm[Cert Manager
app: cert-manager]
+ cmw[Cert Manager Webhook]
+ end
+
+ %% External and K8s Core Components
+ dns[Kube DNS]
+ api[Kubernetes API]
+ prom[Prometheus]
+ ext[External ACME / Internet]
+
+ %% Edges
+ nginx & cm -->|"53/UDP"| dns
+ cm -->|"6443/TCP"| api
+ fc -->|"6443/TCP"| api
+ api -->|"10250/TCP"| cmw
+ prom -->|"10254/TCP"| nginx
+ prom -->|"9402/TCP"| cm
+ cm -->|"443,80/TCP"| ext
+ all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx
+ nginx -->|"egress: all"| all
+ nginx -->|"3000/TCP"| fc
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/ingress/single.md b/docs/network-policies/modules/ingress/single.md
new file mode 100644
index 000000000..b0f7b2054
--- /dev/null
+++ b/docs/network-policies/modules/ingress/single.md
@@ -0,0 +1,35 @@
+# Single Nginx Configuration
+
+```mermaid
+graph TD
+ %% Namespaces
+ subgraph ingress-nginx
+ nginx[Nginx Controller
app: ingress-nginx]
+ fc[Forecastle
app: forecastle]
+ edns[ExternalDNS
app: external-dns]
+ end
+
+ subgraph cert-manager
+ cm[Cert Manager
app: cert-manager]
+ cmw[Cert Manager Webhook]
+ end
+
+ %% External and K8s Core Components
+ dns[Kube DNS]
+ api[Kubernetes API]
+ prom[Prometheus]
+ ext[External / ACME]
+
+ %% Edges
+ nginx & cm -->|"53/UDP"| dns
+ cm -->|"6443/TCP"| api
+ fc -->|"6443/TCP"| api
+ api -->|"10250/TCP"| cmw
+ prom -->|"10254/TCP"| nginx
+ prom -->|"9402/TCP"| cm
+ cm -->|"443,80/TCP"| ext
+ all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx
+ nginx -->|"egress: all"| all
+ nginx -->|"3000/TCP"| fc
+ edns --> |"egress: all"| ext
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/logging/README.md b/docs/network-policies/modules/logging/README.md
new file mode 100644
index 000000000..df072997e
--- /dev/null
+++ b/docs/network-policies/modules/logging/README.md
@@ -0,0 +1,60 @@
+# Logging Module Network Policies
+
+## Components
+
+- OpenSearch Stack
+- Loki Stack
+
+## Namespaces
+
+- logging
+
+## Network Policies List
+
+### Common Policies
+
+- deny-all
+- all-egress-kube-dns
+- event-tailer-egress-kube-apiserver
+- fluentd-egress-all
+- fluentbit-egress-fluentd
+- fluentbit-egress-kube-apiserver
+- fluentbit-ingress-prometheus-metrics
+- logging-operator-egress-kube-apiserver
+
+### OpenSearch Stack
+
+- fluentd-ingress-fluentbit
+- fluentd-ingress-prometheus-metrics
+- opensearch-discovery
+- opensearch-ingress-dashboards
+- opensearch-ingress-fluentd
+- opensearch-ingress-prometheus-metrics
+- opensearch-ingress-jobs
+- opensearch-dashboards-egress-opensearch
+- opensearch-dashboards-ingress-nginx
+- opensearch-dashboards-ingress-jobs
+- jobs-egress-opensearch
+- jobs-egress-opensearch-dashboards
+
+### Loki Stack
+
+- loki-distributed-ingress-fluentd
+- loki-distributed-ingress-grafana
+- loki-distributed-ingress-prometheus-metrics
+- loki-distributed-discovery
+- loki-distributed-egress-all
+
+### MinIO
+
+- minio-ingress-namespace
+- minio-buckets-setup-egress-kube-apiserver
+- minio-buckets-setup-egress-minio
+- minio-ingress-prometheus-metrics
+- minio-ingress-nginx
+- minio-egress-https
+
+## Configurations
+
+- [OpenSearch Stack](opensearch.md)
+- [Loki Stack](loki.md)
diff --git a/docs/network-policies/modules/logging/loki.md b/docs/network-policies/modules/logging/loki.md
new file mode 100644
index 000000000..1b87f37ce
--- /dev/null
+++ b/docs/network-policies/modules/logging/loki.md
@@ -0,0 +1,52 @@
+# Loki Stack Configuration
+
+```mermaid
+graph TD
+ %% Namespaces
+ subgraph logging
+ fb[Fluentbit
app.kubernetes.io/name: fluentbit]
+ fd[Fluentd
app.kubernetes.io/name: fluentd]
+ loki_gateway[Loki Gateway
app.kubernetes.io/component: gateway]
+ loki_compactor[Loki Compactor
app.kubernetes.io/component: compactor]
+ loki_distributor[Loki Distributor
app.kubernetes.io/component: distributor]
+ loki_ingester[Loki Ingester
app.kubernetes.io/component: ingester]
+ loki_querier[Loki Querier
app.kubernetes.io/component: querier]
+ loki_query_frontend[Loki Query Frontend
app.kubernetes.io/component: query-frontend]
+ minio[MinIO
app: minio]
+ bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup]
+ end
+
+ subgraph monitoring
+ prom[Prometheus]
+ graf[Grafana]
+ end
+
+ pom[Pomerium]
+
+ %% External and K8s Core Components
+ api[Kubernetes API]
+ ext[External]
+ dns[Kube DNS]
+
+ %% Edges
+ logging -->|"53/UDP"| dns
+ bucket -->|"6443/TCP"| api
+ fb -->|"24240/TCP"| fd
+ fd -->|"8080/TCP"| loki_gateway
+ prom -->|"3100/TCP"| loki_gateway
+ graf -->|"8080/TCP"| loki_gateway
+ prom -->|"2020/TCP"| fb
+ fb -->|"6443/TCP"| api
+ loki_query_frontend -->|"loki-discovery
9095,7946,3100/TCP"| loki_distributor
+ loki_distributor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester
+ loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester
+ loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_query_frontend
+ loki_compactor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester
+ loki_compactor -->|"egress: all"| minio
+ loki_ingester -->|"egress: all"| minio
+ loki_querier -->|"egress: all"| minio
+ bucket -->|"9000/TCP"| minio
+ minio -->|"443/TCP"| ext
+ pom -->|"9001/TCP"| minio
+ minio -->|"9000/TCP"| logging
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/logging/opensearch.md b/docs/network-policies/modules/logging/opensearch.md
new file mode 100644
index 000000000..5cf5727eb
--- /dev/null
+++ b/docs/network-policies/modules/logging/opensearch.md
@@ -0,0 +1,48 @@
+# Opensearch Stack Configuration
+
+```mermaid
+graph TD
+ %% Namespace
+ subgraph logging
+ fb[Fluentbit
app.kubernetes.io/name: fluentbit]
+ fd[Fluentd
app.kubernetes.io/name: fluentd]
+ os[OpenSearch
app.kubernetes.io/name: opensearch]
+ osd[OpenSearch Dashboards
app: opensearch-dashboards]
+ minio[MinIO
app: minio]
+ bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup]
+ op[Logging Operator
app.kubernetes.io/name: logging-operator]
+ et[Event Tailer
app.kubernetes.io/name: event-tailer]
+ job[OpenSearch Jobs]
+ end
+
+ %% External and K8s Core Components
+ api[Kubernetes API]
+ ext[External]
+ prom[Prometheus]
+ pom[Pomerium]
+ nginx[Nginx]
+ dns[Kube DNS]
+
+ %% Edges
+ logging --->|"53/UDP,TCP"| dns
+ fb -->|"6443/TCP"| api
+ et -->|"6443/TCP"| api
+ op -->|"6443/TCP"| api
+ bucket -->|"6443/TCP"| api
+ fb -->|"24240/TCP"| fd
+ fd -->|"egress: all"| os
+ osd -->|"9200/TCP"| os
+ pom -->|"5601/TCP"| osd
+ job -->|"5601/TCP"| osd
+ job -->|"9200/TCP"| os
+ prom -->|"2020/TCP"| fb
+ prom -->|"24231/TCP"| fd
+ prom -->|"9108/TCP"| os
+ prom -->|"9000/TCP"| minio
+ bucket -->|"9000/TCP"| minio
+ minio -->|"443/TCP"| ext
+ pom -->|"9001/TCP"| minio
+ logging -->|"9000/TCP"| minio
+ nginx -->|"9001/TCP"| minio
+ nginx -->|"5601/TCP"| osd
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/monitoring/README.md b/docs/network-policies/modules/monitoring/README.md
new file mode 100644
index 000000000..acf6e419e
--- /dev/null
+++ b/docs/network-policies/modules/monitoring/README.md
@@ -0,0 +1,55 @@
+# Monitoring Module Network Policies
+
+## Components
+- Prometheus Stack
+- Mimir Stack
+
+## Namespaces
+- monitoring
+
+## Network Policies List
+
+### Common Policies
+- deny-all
+- all-egress-kube-dns
+- alertmanager-main
+- alertmanager-ingress-nginx
+- blackbox-exporter
+- grafana
+- grafana-egress-tempo-gateway
+- grafana-ingress-nginx
+- kube-state-metrics
+- node-exporter
+- prometheus-ingress-nginx
+- prometheus-adapter
+- prometheus-ingress-prometheus-adapter
+- prometheus-operator
+- x509-exporter-egress-kube-apiserver
+- x509-exporter-ingress-prometheus-metrics
+- kube-state-metrics
+
+### MinIO
+- minio-ingress-namespace
+- minio-buckets-setup-egress-kube-apiserver
+- minio-buckets-setup-egress-minio
+- minio-ingress-prometheus-metrics
+- minio-monitoring-egress-all
+
+### Prometheus specific
+- prometheus-k8s
+- prometheus-egress-minio
+- prometheus-egress-kube-apiserver
+
+### Mimir specific
+- mimir-distributed-discovery
+- mimir-distributed-ingress-prometheus-metrics
+- mimir-gateway-ingress-grafana
+- mimir-querier-egress-https
+- mimir-ingester-egress-https
+- mimir-distributed-egress-minio (when using MinIO)
+- mimir-distributed-egress-all (when not using MinIO)
+
+## Configurations
+- [Prometheus Stack](prometheus.md)
+- [Mimir Stack](mimir.md)
+
diff --git a/docs/network-policies/modules/monitoring/mimir.md b/docs/network-policies/modules/monitoring/mimir.md
new file mode 100644
index 000000000..5619dbfe4
--- /dev/null
+++ b/docs/network-policies/modules/monitoring/mimir.md
@@ -0,0 +1,53 @@
+# Mimir Stack Configuration
+
+```mermaid
+graph TD
+ %% Namespace
+ subgraph monitoring
+ gateway[Mimir Gateway
app.kubernetes.io/component: gateway]
+ distributor[Mimir Distributor
app.kubernetes.io/component: distributor]
+ ingester[Mimir Ingester
app.kubernetes.io/component: ingester]
+ querier[Mimir Querier
app.kubernetes.io/component: querier]
+ qfront[Mimir Query Frontend
app.kubernetes.io/component: query-frontend]
+ qsched[Mimir Query Scheduler
app.kubernetes.io/component: query-scheduler]
+ store[Mimir Store Gateway
app.kubernetes.io/component: store-gateway]
+ compactor[Mimir Compactor
app.kubernetes.io/component: compactor]
+ grafana[Grafana
app.kubernetes.io/name: grafana]
+ prom[Prometheus
app.kubernetes.io/name: prometheus]
+ am[Alertmanager
app.kubernetes.io/component: alert-router]
+ bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter]
+ ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics]
+ ne[Node Exporter
app.kubernetes.io/name: node-exporter]
+ x509[x509 Exporter
app: x509-certificate-exporter]
+ minio[MinIO
app: minio]
+ bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup]
+ end
+
+ %% External and K8s Core Components
+ api[Kubernetes API]
+ dns[Kube DNS]
+
+ %% Edges
+ monitoring -->|"53/UDP,TCP"| dns
+ bucket -->|"9000/TCP"| minio
+ qfront -->|"mimir-discovery
9095,7946,8080/TCP"| qsched
+ qfront -->|"mimir-discovery
9095,7946,8080/TCP"| querier
+ gateway -->|"mimir-discovery
9095,7946,8080/TCP"| distributor
+ distributor -->|"mimir-discovery
9095,7946,8080/TCP"| ingester
+ qsched -->|"mimir-discovery
9095,7946,8080/TCP"| querier
+ querier -->|"mimir-discovery
9095,7946,8080/TCP"| store
+ querier -->|"mimir-discovery
9095,7946,8080/TCP"| ingester
+ store -->|"mimir-discovery
9095,7946,8080/TCP"| compactor
+ compactor -->|"mimir-discovery
9095,7946,8080/TCP"| store
+ ingester & store & compactor -->|"9000/TCP"| minio
+ grafana -->|"8080/TCP"| gateway
+ prom -->|"8080/TCP"| distributor
+ prom -->|"9115,19115/TCP"| bb
+ prom -->|"8443,9443/TCP"| ksm
+ prom -->|"9100/TCP"| ne
+ prom -->|"9793/TCP"| x509
+ prom -->|"9093,8080/TCP"| am
+ pom[Pomerium] -->|"3000/TCP"| grafana
+ pom -->|"9093/TCP"| am
+ x509 -->|"6443/TCP"| api
+```
\ No newline at end of file
diff --git a/docs/network-policies/modules/monitoring/prometheus.md b/docs/network-policies/modules/monitoring/prometheus.md
new file mode 100644
index 000000000..f05457035
--- /dev/null
+++ b/docs/network-policies/modules/monitoring/prometheus.md
@@ -0,0 +1,43 @@
+# Prometheus Stack Configuration
+
+```mermaid
+graph TD
+ %% Namespace
+ subgraph monitoring
+ prom[Prometheus
app.kubernetes.io/name: prometheus]
+ grafana[Grafana
app.kubernetes.io/name: grafana]
+ am[Alertmanager
app.kubernetes.io/name: alertmanager]
+ bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/component: exporter]
+ ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/component: exporter]
+ ne[Node Exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/component: exporter]
+ pa[Prometheus Adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/component: metrics-adapter]
+ po[Prometheus Operator
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/component: controller]
+ minio[MinIO
app: minio]
+ bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup]
+ x509[x509 Exporter
app: x509-certificate-exporter]
+ end
+
+ %% External and K8s Core Components
+ api[Kubernetes API]
+ dns[Kube DNS]
+ pom["Pomerium"]
+
+ %% Edges
+ monitoring -->|"53/UDP,TCP"| dns
+ bucket -->|"9000/TCP"| minio
+ prom -->|"6443,8405/TCP"| api
+ prom -->|"9000/TCP"| minio
+ prom -->|"9115,19115/TCP"| bb
+ prom -->|"8443,9443/TCP"| ksm
+ prom -->|"9100/TCP"| ne
+ prom -->|"8443/TCP"| po
+ prom -->|"9793/TCP"| x509
+ prom & am & bb & grafana & ksm & ne & pa & po -->|"egress: all"| all[All Namespaces]
+ pa -->|"9090/TCP"| prom
+ grafana -->|"9090/TCP"| prom
+ prom -->|"9093,8080/TCP"| am
+ pom -->|"9093/TCP"| am
+ prom -->|"3000/TCP"| grafana
+ pom -->|"3000/TCP"| grafana
+ x509 -->|"6443/TCP"| api
+```
diff --git a/docs/network-policies/modules/opa/README.md b/docs/network-policies/modules/opa/README.md
new file mode 100644
index 000000000..a43db5d2a
--- /dev/null
+++ b/docs/network-policies/modules/opa/README.md
@@ -0,0 +1,36 @@
+# OPA Module Network Policies
+
+## Components
+- Gatekeeper + Gatekeeper Policy Manager
+- Kyverno
+
+## Namespaces
+- gatekeeper-system (when using Gatekeeper)
+- kyverno (when using Kyverno)
+
+## Network Policies List
+
+### Gatekeeper
+- deny-all
+- all-egress-dns
+- audit-controller-egress-kube-apiserver
+- controller-manager-egress-kube-apiserver
+- controller-manager-ingress-kube-apiserver
+- gpm-egress-kube-apiserver
+- gpm-ingress-pomerium
+- gatekeeper-ingress-prometheus-metrics
+
+### Kyverno
+- deny-all
+- all-egress-dns
+- kyverno-admission-egress-kube-apiserver
+- kyverno-admission-ingress-nodes
+- kyverno-background-egress-kube-apiserver
+- kyverno-reports-egress-kube-apiserver
+- kyverno-cleanup-egress-kube-apiserver
+- kyverno-cleanup-reports-egress-kube-apiserver
+
+## Configurations
+- [Gatekeeper](gatekeeper.md)
+- [Kyverno](kyverno.md)
+
diff --git a/docs/network-policies/modules/opa/gatekeeper.md b/docs/network-policies/modules/opa/gatekeeper.md
new file mode 100644
index 000000000..d3d049f7d
--- /dev/null
+++ b/docs/network-policies/modules/opa/gatekeeper.md
@@ -0,0 +1,26 @@
+# Gatekeeper Configuration
+
+```mermaid
+graph TD
+ %% Namespace
+ subgraph gatekeeper-system
+ audit[Audit Controller
control-plane: audit-controller]
+ cm[Controller Manager
control-plane: controller-manager]
+ gpm[Policy Manager
app: gatekeeper-policy-manager]
+ end
+
+ %% External and K8s Core Components
+ api[Kubernetes API]
+ dns[Kube DNS]
+ prom[Prometheus]
+ pom[Pomerium]
+
+ %% Edges
+ audit & cm -->|"53/UDP"| dns
+ audit -->|"6443/TCP"| api
+ cm -->|"6443/TCP"| api
+ gpm -->|"6443/TCP"| api
+ pom -->|"8080/TCP"| gpm
+ prom -->|"8888/TCP"| audit & cm
+ api -->|"8443,443/TCP"| cm
+```
diff --git a/docs/network-policies/modules/opa/kyverno.md b/docs/network-policies/modules/opa/kyverno.md
new file mode 100644
index 000000000..602c40dc8
--- /dev/null
+++ b/docs/network-policies/modules/opa/kyverno.md
@@ -0,0 +1,27 @@
+# Kyverno Configuration
+
+```mermaid
+graph TD
+ %% Namespace
+ subgraph kyverno
+ admission[Admission Controller
component: admission-controller]
+ background[Background Controller
component: background-controller]
+ reports[Reports Controller
component: reports-controller]
+ cleanup[Cleanup Controller
component: cleanup-controller]
+ end
+
+ %% External and K8s Core Components
+ dns[Kube DNS]
+ api[Kubernetes API]
+
+ %% Edges
+ admission -->|"53/UDP"| dns
+ background -->|"53/UDP"| dns
+ reports -->|"53/UDP"| dns
+ cleanup -->|"53/UDP"| dns
+ admission -->|"6443/TCP"| api
+ background -->|"6443/TCP"| api
+ reports -->|"6443/TCP"| api
+ cleanup -->|"6443/TCP"| api
+ all[All Namespaces] -->|"9443/TCP"| admission
+```
diff --git a/docs/network-policies/modules/tracing/README.md b/docs/network-policies/modules/tracing/README.md
new file mode 100644
index 000000000..4db75de3b
--- /dev/null
+++ b/docs/network-policies/modules/tracing/README.md
@@ -0,0 +1,32 @@
+# Tracing Module Network Policies
+
+## Components
+- Tempo
+
+## Namespaces
+- tracing
+
+## Network Policies List
+- deny-all
+- all-egress-kube-dns
+- tempo-distributed-discovery
+- tempo-distributed-ingress-prometheus-metrics
+- tempo-gateway-ingress-grafana
+- all-egress-tempo-distributor
+- tempo-distributor-ingress-traces
+- tempo-components-egress-memcached
+- memcached-ingress-querier
+- tempo-components-egress-https
+- tempo-distributed-egress-minio (when using MinIO)
+- tempo-distributed-egress-all (when not using MinIO)
+
+### MinIO
+- minio-ingress-namespace
+- minio-buckets-setup-egress-kube-apiserver
+- minio-buckets-setup-egress-minio
+- minio-ingress-prometheus-metrics
+- minio-ingress-pomerium
+- minio-egress-https
+
+## Configurations
+- [Tempo](tempo.md)
diff --git a/docs/network-policies/modules/tracing/tempo.md b/docs/network-policies/modules/tracing/tempo.md
new file mode 100644
index 000000000..0ca46d1ba
--- /dev/null
+++ b/docs/network-policies/modules/tracing/tempo.md
@@ -0,0 +1,42 @@
+# Tempo Configuration
+
+```mermaid
+graph TD
+ %% Namespaces
+ subgraph tracing
+ gateway[Tempo Gateway
component: gateway]
+ dist[Tempo Distributor
component: distributor]
+ query[Tempo Querier
component: querier]
+ mem[Memcached
component: memcached]
+ minio[MinIO
app: minio]
+ bucket[MinIO Bucket Setup
app: minio-tracing-buckets-setup]
+ end
+
+ subgraph monitoring
+ graf[Grafana]
+ prom[Prometheus]
+ end
+
+ subgraph pomerium
+ pom[Pomerium]
+ end
+
+ allns[All Namespaces]
+
+ %% External and K8s Core Components
+ dns[Kube DNS]
+ ext[External]
+
+ %% Edges
+ gateway & dist & query -->|"53/UDP"| dns
+ gateway -->|"9095,7946,3100/TCP"| dist & query
+ dist -->|"9095,7946,3100/TCP"| query
+ query -->|"11211/TCP"| mem
+ allns -->|"4317/TCP"| dist
+ graf -->|"8080/TCP"| gateway
+ prom -->|"3100/TCP"| gateway & dist & query
+ pom -->|"9001/TCP"| minio
+ query -->|"9000/TCP"| minio
+ minio -->|"443/TCP"| ext
+ bucket -->|"9000/TCP"| minio
+```
\ No newline at end of file
diff --git a/docs/network-policies/overview.md b/docs/network-policies/overview.md
new file mode 100644
index 000000000..93fbc5033
--- /dev/null
+++ b/docs/network-policies/overview.md
@@ -0,0 +1,36 @@
+# KFD Network Policies Overview
+
+```mermaid
+graph TD
+ subgraph kfd[KFD Core Modules]
+ ingress[Ingress
Nginx + Cert-manager]
+ auth[Auth
Pomerium]
+ mon[Monitoring
Prometheus/Mimir]
+ log[Logging
Opensearch/Loki]
+ tracing[Tracing
Tempo]
+ opa[OPA
Gatekeeper/Kyverno]
+ end
+
+ %% K8s Core Components
+ dns[KubeDNS]
+ api[Kubernetes API]
+ ext[External]
+
+ %% Edges
+ kfd --->|"53/UDP"| dns
+ kfd -->|"6443/TCP"| api
+ ingress -->|"8080/TCP"| auth
+ auth -->|"auth proxy"| mon & log & tracing & opa
+ auth -->|"443/TCP"| ext
+ mon -->|"metrics"| all
+ mon -->|"metrics"| auth
+ mon -->|"metrics"| ingress
+ mon -->|"metrics"| log
+ mon -->|"metrics"| tracing
+ mon -->|"metrics"| opa
+ all[All Namespaces] -->|"logs"| log
+ all -->|"traces"| tracing
+
+
+
+```
\ No newline at end of file
diff --git a/docs/releases/v1.1.0.md b/docs/releases/legacy/v1.1.0.md
similarity index 100%
rename from docs/releases/v1.1.0.md
rename to docs/releases/legacy/v1.1.0.md
diff --git a/docs/releases/v1.2.0.md b/docs/releases/legacy/v1.2.0.md
similarity index 100%
rename from docs/releases/v1.2.0.md
rename to docs/releases/legacy/v1.2.0.md
diff --git a/docs/releases/v1.3.0.md b/docs/releases/legacy/v1.3.0.md
similarity index 100%
rename from docs/releases/v1.3.0.md
rename to docs/releases/legacy/v1.3.0.md
diff --git a/docs/releases/v1.4.0.md b/docs/releases/legacy/v1.4.0.md
similarity index 100%
rename from docs/releases/v1.4.0.md
rename to docs/releases/legacy/v1.4.0.md
diff --git a/docs/releases/v1.5.0.md b/docs/releases/legacy/v1.5.0.md
similarity index 100%
rename from docs/releases/v1.5.0.md
rename to docs/releases/legacy/v1.5.0.md
diff --git a/docs/releases/v1.5.1.md b/docs/releases/legacy/v1.5.1.md
similarity index 100%
rename from docs/releases/v1.5.1.md
rename to docs/releases/legacy/v1.5.1.md
diff --git a/docs/releases/v1.6.0.md b/docs/releases/legacy/v1.6.0.md
similarity index 100%
rename from docs/releases/v1.6.0.md
rename to docs/releases/legacy/v1.6.0.md
diff --git a/docs/releases/v1.7.0.md b/docs/releases/legacy/v1.7.0.md
similarity index 100%
rename from docs/releases/v1.7.0.md
rename to docs/releases/legacy/v1.7.0.md
diff --git a/docs/releases/v1.7.1.md b/docs/releases/legacy/v1.7.1.md
similarity index 100%
rename from docs/releases/v1.7.1.md
rename to docs/releases/legacy/v1.7.1.md
diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md
new file mode 100644
index 000000000..bf96082d8
--- /dev/null
+++ b/docs/releases/v1.30.0.md
@@ -0,0 +1,206 @@
+# Kubernetes Fury Distribution Release v1.30.0
+
+Welcome to KFD release `v1.30.0`. This is the first release of KFD supporting Kubernetes 1.30.
+
+The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.io/).
+
+## New Features since `v1.29.4`
+
+### Installer Updates
+
+- [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6)
+ - Updated etcd default version to 3.5.15
+ - Updated HAProxy version to 3.0 TLS
+ - Updated containerd default version to 1.7.23
+ - Added support for Kubernetes versions 1.30.6, 1.29.10 and 1.28.15
+- [eks](https://github.com/sighupio/fury-eks-installer) ๐ฆ installer: [**v3.2.0**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.2.0)
+ - Introduced AMI selection type: `alinux2023` and `alinux2`
+ - Fixed eks-managed nodepool node labels
+
+### Module updates
+
+- [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐ฆ core module: [**v2.0.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v2.0.0)
+ - Updated Tigera operator to v1.36.1 (that includes calico v3.29.0)
+ - Updated Cilium to v1.16.3
+- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐ฆ core module: [**v3.3.0**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v3.3.0)
+ - Updated blackbox-exporter to v0.25.0
+ - Updated grafana to v11.3.0
+ - Updated kube-rbac-proxy to v0.18.1
+ - Updated kube-state-metrics to v2.13.0
+ - Updated node-exporter to v1.8.2
+ - Updated prometheus-adapter to v0.12.0
+ - Updated prometheus-operator to v0.76.2
+ - Updated prometheus to v2.54.1
+ - Updated x509-exporter to v3.17.0
+ - Updated mimir to v2.14.0
+ - Updated minio to version RELEASE.2024-10-13T13-34-11Z
+- [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐ฆ core module: [**v4.0.0**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v4.0.0)
+ - Updated opensearch and opensearch-dashboards to v2.17.1
+ - Updated logging-operator to v4.10.0
+ - Updated loki to v2.9.10
+ - Updated minio to version RELEASE.2024-10-13T13-34-11Z
+- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐ฆ core module: [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v3.0.1)
+ - Updated cert-manager to v1.16.1
+ - Updated external-dns to v0.15.0
+ - Updated forecastle to v1.0.145
+ - Updated nginx to v1.11.3
+- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐ฆ core module: [**v0.4.0**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.4.0)
+ - Updated dex to v2.41.1
+ - Updated pomerium to v0.27.1
+- [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐ฆ core module: [**v3.0.0**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v3.0.0)
+ - Updated velero to v1.15.0
+ - Updated all velero plugins to v1.11.0
+ - Added snapshot-controller v8.0.1
+- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐ฆ core module: [**v1.1.0**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/v1.1.0)
+ - Updated tempo to v2.6.0
+ - Updated minio to version RELEASE.2024-10-13T13-34-11Z
+- [opa](https://github.com/sighupio/fury-kubernetes-opa) ๐ฆ core module: [**v1.13.0**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.13.0)
+ - Updated gatekeeper to v3.17.1
+ - Updated gatekeeper-policy-manager to v1.0.13
+ - Updated kyverno to v1.12.6
+- [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐ฆ module: [**v4.3.0**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/v4.3.0)
+ - Updated cluster-autoscaler to v1.30.0
+ - Updated snapshot-controller to v8.1.0
+ - Updated aws-load-balancer-controller to v2.10.0
+ - Updated node-termination-handler to v1.22.0
+
+## Breaking changes ๐
+
+- **Loki store and schema change**: A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. See "[New features](#new-features-)" below for more details.
+- **DR schema change**: A new format for the schedule customization has been introduced to improve the usability. See "[New Features](#new-features-)" section below for more details.
+- **Kyverno validation failure action**: Kyverno has deprecated `audit` and `enforce` as valid options for the `validationFailureAction`, valid options are now `Audit` and `Enforce`, in title case. Adjust your `.spec.distribution.modules.policy.kyverno.validationFailureAction` value accordingly.
+
+## New features ๐
+
+- **New option for Logging**: Loki's configuration has been extended to accommodate a new `tsdbStartDate` **required** option to allow a migration towards TSDB and schema v13 storage (note: **this is a breaking change**):
+
+ ```yaml
+ ...
+ spec:
+ distribution:
+ modules:
+ logging:
+ loki:
+ tsdbStartDate: "2024-11-18"
+ ...
+ ```
+
+ - `tsdbStartDate` (**required**): a string in `ISO 8601` date format that represents the day starting from which Loki will record logs with the new store and schema.
+
+ โน๏ธ **Note**: Loki will assume the start of the day on the UTC midnight of the specified day.
+
+- **Improved configurable schedules for DR backups**: the schedule configuration has been updated to enhance the usability of schedule customization (note: **this is a breaking change**):
+
+ ```yaml
+ ...
+ spec:
+ distribution:
+ modules:
+ dr:
+ velero:
+ schedules:
+ install: true
+ definitions:
+ manifests:
+ schedule: "*/15 * * * *"
+ ttl: "720h0m0s"
+ full:
+ schedule: "0 1 * * *"
+ ttl: "720h0m0s"
+ snapshotMoveData: false
+ ...
+ ```
+
+- **DR snapshotMoveData options for full schedule**: a new parameter has been introduced in the velero `full` schedule to enable the `snapshotMoveData` feature. This feature allows data captured from a snapshot to be copied to the object storage location. **Important**: Setting this parameter to `true` will cause Velero to upload all data from the snapshotted volumes to S3 using Kopia. While backups are deduplicated, significant storage usage is still expected. To enable this use the following parameter in the full schedule configuration:
+
+ ```yaml
+ ...
+ spec:
+ distribution:
+ modules:
+ dr:
+ velero:
+ schedules:
+ install: true
+ definitions:
+ full:
+ snapshotMoveData: true
+ ...
+ ```
+
+General example to enable Volume Snapshotting on rook-ceph (from our storage add-on module):
+
+ ```yaml
+ apiVersion: snapshot.storage.k8s.io/v1
+ kind: VolumeSnapshotClass
+ metadata:
+ name: velero-snapclass
+ labels:
+ velero.io/csi-volumesnapshot-class: "true"
+ driver: rook-ceph.rbd.csi.ceph.com
+ parameters:
+ clusterID: rook-ceph
+ csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
+ deletionPolicy: Retain
+ ```
+
+`deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide `volumesnapshotcontent` CR will be preserved, maintaining the snapshot on the storage that the cluster is using.
+
+**NOTE**: For EKSCluster provider, a default VolumeSnapshotClass is created automatically.
+
+- **DR optional snapshot-controller installation**: To leverage VolumeSnapshots on the OnPremises and KFDDistribution providers, a new option on velero has been added to install the snapshot-controller component. Before activating this parameter make sure that in your cluster there is not another snapshot-controller component deployed. By default this parameter is `false`.
+
+ ```yaml
+ ...
+ spec:
+ distribution:
+ modules:
+ dr:
+ velero:
+ snapshotController:
+ install: true
+ ...
+ ```
+
+- **Prometheus ScrapeConfigs**: the Monitoring module now enables by default the `scrapeConfig` CRDs from the Prometheus Operator. All the scrapeConfig objects present in the cluster will now be detected by the operator. `ScrapeConfig` objects are used to instruct Prometheus to scrape specific endpoints that could be outside the cluster.
+
+- **Components Hardening**: we hardened the security context of several components, improving the out-of-the-box security of the distribution.
+
+- **On-premises minimal clusters**: it is now possible to create clusters with only control-plane nodes, for minimal clusters installations that need to handle minimal workloads.
+
+- **Helm Plugins**: Helm plugins now allow disabling validation at installation time with the `disableValidationOnInstall` option. This can be useful when installing Helm charts that fail the diff step on a first installation, for example.
+
+- **Network Policies** (experimental ๐งช): a new experimental feature is introduced in this version. You can now enable the installation of network policies that will restrict the traffic across all the infrastructural namespaces of KFD to just the access needed for its proper functioning and denying the rest of it. Improving the overall security of the cluster. This experimental feature is only available in OnPremises cluster at the moment. Read more in the [Pull Request](https://github.com/sighupio/fury-distribution/pull/302) introducing the feature and in the [relative documentation](https://github.com/sighupio/fury-distribution/tree/main/docs/network-policies).
+
+- **Global CVE patched images for core modules**: This distribution version includes images that have been patched for OS vulnerabilities (CVE). To use these patched images, select the following option:
+ ```yaml
+ ...
+ spec:
+ distribution:
+ common:
+ registry: registry.sighup.io/fury-secured
+ ...
+ ```
+
+
+## Fixes ๐
+
+- Improved Configuration Schema documentation: documentation for the configuration schemas was lacking, we greatly improved the quality and quantity of the documentation regarding each option in the schemas, for all the configuration kinds (OnPremises, EKSCluster, KFDDistribution).
+- [[#264](https://github.com/sighupio/fury-distribution/pull/264)] Hubble UI: now is shown in the right group in the Directory
+- [[#277](https://github.com/sighupio/fury-distribution/pull/277)] Hubble UI: make it work when auth type is SSO.
+- [[#275](https://github.com/sighupio/fury-distribution/pull/275)] On-premises: use the `org` parameter for additional created users, it was being ignored before.
+- [[#279](https://github.com/sighupio/fury-distribution/pull/279)] Monitoring: don't install x509 data plane on EKS clusters because it is not needed and triggers false alerts.
+- [[#280](https://github.com/sighupio/fury-distribution/pull/280)] Migrations: fix migration from Auth type from `sso` to `basicAuth` and viceversa.
+- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Migrations: some ingresses were not being deleted when migrating to Ingress type `none`.
+- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Ingress: don't create TLS secret when ingress type is `none`.
+- [[#283](https://github.com/sighupio/fury-distribution/pull/283)] EKS schema validation: fix DNS validation depending on if nginx is single, dual or none.
+- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Monitoring: `minio-monitoring` ingress is now working when SSO is enabled.
+- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Tracing: `minio-tracing` ingress is now created when Logging type is `none` and `auth.type` is `sso`.
+- [[#293](https://github.com/sighupio/fury-distribution/pull/293)] Monitoring migrations: remove `minio-monitoring` ingress when migrating monitoring type from `mimir` to `none`.
+- [[#301](https://github.com/sighupio/fury-distribution/pull/301)] Migrations: fix an error on the concatenation of kustomize bases. `external-dns` and `opensearch` are properly deleted now and no components are left behind.
+- [[#310](https://github.com/sighupio/fury-distribution/pull/310)] Migrations: fix an error while migrating from auth type `none` to `sso` related to old ingresses not being deleted first.
+
+## Upgrade procedure
+
+Check the [upgrade docs](https://docs.kubernetesfury.com/docs/upgrades/upgrades) for the detailed procedure.
diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md
index 387244539..e4a0eed68 100644
--- a/docs/schemas/ekscluster-kfd-v1alpha2.md
+++ b/docs/schemas/ekscluster-kfd-v1alpha2.md
@@ -2,8 +2,14 @@
This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
## Properties
| Property | Type | Required |
@@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
### Description
-A Fury Cluster deployed through AWS's Elastic Kubernetes Service
+A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).
## .apiVersion
@@ -33,7 +39,7 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service
## .metadata.name
+### Description
+
+The name of the cluster. It will also be used as a prefix for all the other resources created.
+
### Constraints
**maximum length**: the maximum number of characters for this string is: `56`
@@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service
| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional |
| [tolerations](#specdistributioncommontolerations) | `array` | Optional |
+### Description
+
+Common configuration for all the distribution modules.
+
## .spec.distribution.common.nodeSelector
### Description
-The node selector to use to place the pods for all the KFD modules
+The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
## .spec.distribution.common.provider
@@ -110,21 +124,21 @@ The node selector to use to place the pods for all the KFD modules
### Description
-The type of the provider, must be EKS if specified
+The provider type. Don't set. FOR INTERNAL USE ONLY.
## .spec.distribution.common.registry
### Description
-URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).
+URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).
-NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too.
+NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too.
## .spec.distribution.common.relativeVendorPath
### Description
-The relative path to the vendor directory, does not need to be changed
+The relative path to the vendor directory, does not need to be changed.
## .spec.distribution.common.tolerations
@@ -139,13 +153,19 @@ The relative path to the vendor directory, does not need to be changed
### Description
-The tolerations that will be added to the pods for all the KFD modules
+An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:
+
+```yaml
+- effect: NoSchedule
+ key: node.kubernetes.io/role
+ value: infra
+```
## .spec.distribution.common.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -163,7 +183,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -210,7 +230,7 @@ The behavior of the configmap
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -418,7 +438,7 @@ The behavior of the secret
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -525,11 +545,15 @@ The type of the secret
| [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional |
| [provider](#specdistributionmodulesauthprovider) | `object` | Required |
+### Description
+
+Configuration for the Auth module.
+
## .spec.distribution.modules.auth.baseDomain
### Description
-The base domain for the auth module
+The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class.
## .spec.distribution.modules.auth.dex
@@ -542,17 +566,32 @@ The base domain for the auth module
| [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional |
| [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional |
+### Description
+
+Configuration for the Dex package.
+
## .spec.distribution.modules.auth.dex.additionalStaticClients
### Description
-The additional static clients for dex
+Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:
+
+```yaml
+additionalStaticClients:
+ - id: my-custom-client
+ name: "A custom additional static client"
+ redirectURIs:
+ - "https://myapp.tld/redirect"
+ - "https://alias.tld/oidc-callback"
+ secret: supersecretpassword
+```
+Reference: https://dexidp.io/docs/connectors/local/
## .spec.distribution.modules.auth.dex.connectors
### Description
-The connectors for dex
+A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/
## .spec.distribution.modules.auth.dex.expiry
@@ -588,7 +627,7 @@ Dex signing key expiration time duration (default 6h).
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.auth.dex.overrides.tolerations
@@ -603,13 +642,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.auth.dex.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -627,7 +666,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -650,13 +689,21 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the Auth module.
+
## .spec.distribution.modules.auth.overrides.ingresses
+### Description
+
+Override the definition of the Auth module ingresses.
+
## .spec.distribution.modules.auth.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the auth module
+Set to override the node selector used to place the pods of the Auth module.
## .spec.distribution.modules.auth.overrides.tolerations
@@ -671,13 +718,13 @@ The node selector to use to place the pods for the auth module
### Description
-The tolerations that will be added to the pods for the auth module
+Set to override the tolerations that will be added to the pods of the Auth module.
## .spec.distribution.modules.auth.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -695,7 +742,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -791,7 +838,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -805,7 +852,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -895,27 +942,36 @@ cat ec_private.pem | base64
| [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required |
| [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required |
+### Description
+
+Configuration for the HTTP Basic Auth provider.
+
## .spec.distribution.modules.auth.provider.basicAuth.password
### Description
-The password for the basic auth
+The password for logging in with the HTTP basic authentication.
## .spec.distribution.modules.auth.provider.basicAuth.username
### Description
-The username for the basic auth
+The username for logging in with the HTTP basic authentication.
## .spec.distribution.modules.auth.provider.type
### Description
-The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
+The type of the Auth provider, options are:
+- `none`: will disable authentication in the infrastructural ingresses.
+- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.
+- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------|
@@ -969,7 +1025,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
### Description
-The node selector to use to place the pods for the load balancer controller module
+The node selector to use to place the pods for the load balancer controller module.
## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations
@@ -984,13 +1040,13 @@ The node selector to use to place the pods for the load balancer controller modu
### Description
-The tolerations that will be added to the pods for the cluster autoscaler module
+The tolerations that will be added to the pods for the cluster autoscaler module.
## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1008,7 +1064,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1055,7 +1111,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the load balancer controller module
+The node selector to use to place the pods for the load balancer controller module.
## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations
@@ -1070,13 +1126,13 @@ The node selector to use to place the pods for the load balancer controller modu
### Description
-The tolerations that will be added to the pods for the cluster autoscaler module
+The tolerations that will be added to the pods for the cluster autoscaler module.
## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1094,7 +1150,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1128,7 +1184,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations
@@ -1143,13 +1199,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1167,7 +1223,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1214,7 +1270,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the load balancer controller module
+The node selector to use to place the pods for the load balancer controller module.
## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations
@@ -1229,13 +1285,13 @@ The node selector to use to place the pods for the load balancer controller modu
### Description
-The tolerations that will be added to the pods for the cluster autoscaler module
+The tolerations that will be added to the pods for the cluster autoscaler module.
## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1253,7 +1309,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1276,13 +1332,17 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.aws.overrides.ingresses
## .spec.distribution.modules.aws.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.aws.overrides.tolerations
@@ -1297,13 +1357,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.aws.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1321,7 +1381,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1344,6 +1404,10 @@ The value of the toleration
| [type](#specdistributionmodulesdrtype) | `string` | Required |
| [velero](#specdistributionmodulesdrvelero) | `object` | Optional |
+### Description
+
+Configuration for the Disaster Recovery module.
+
## .spec.distribution.modules.dr.overrides
### Properties
@@ -1354,13 +1418,17 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.dr.overrides.ingresses
## .spec.distribution.modules.dr.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.dr.overrides.tolerations
@@ -1375,13 +1443,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.dr.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1399,7 +1467,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1416,11 +1484,13 @@ The value of the toleration
### Description
-The type of the DR, must be ***none*** or ***eks***
+The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------|
@@ -1450,17 +1520,17 @@ The type of the DR, must be ***none*** or ***eks***
### Description
-The name of the velero bucket
+The name of the bucket for Velero.
## .spec.distribution.modules.dr.velero.eks.region
### Description
-The region where the velero bucket is located
+The region where the bucket for Velero will be located.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-----------------|
@@ -1507,7 +1577,7 @@ The region where the velero bucket is located
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.dr.velero.overrides.tolerations
@@ -1522,13 +1592,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.dr.velero.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1546,7 +1616,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1563,53 +1633,91 @@ The value of the toleration
### Properties
-| Property | Type | Required |
-|:------------------------------------------------------------|:----------|:---------|
-| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional |
-| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
-| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional |
+| Property | Type | Required |
+|:--------------------------------------------------------------------|:----------|:---------|
+| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional |
+| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
### Description
Configuration for Velero's backup schedules.
-## .spec.distribution.modules.dr.velero.schedules.cron
+## .spec.distribution.modules.dr.velero.schedules.definitions
### Properties
-| Property | Type | Required |
-|:--------------------------------------------------------------------|:---------|:---------|
-| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional |
-| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------|:---------|:---------|
+| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional |
+| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional |
+
+### Description
+
+Configuration for Velero schedules.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full
+
+### Properties
+
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------------------|:----------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional |
+| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional |
### Description
-Configuration for Velero's schedules cron.
+Configuration for Velero's manifests backup schedule.
-## .spec.distribution.modules.dr.velero.schedules.cron.full
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule
### Description
The cron expression for the `full` backup schedule (default `0 1 * * *`).
-## .spec.distribution.modules.dr.velero.schedules.cron.manifests
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData
### Description
-The cron expression for the `manifests` backup schedule (default `*/15 * * * *`).
+EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation.
-## .spec.distribution.modules.dr.velero.schedules.install
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl
### Description
-Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`.
+The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests
+
+### Properties
+
+| Property | Type | Required |
+|:----------------------------------------------------------------------------------|:---------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional |
+
+### Description
+
+Configuration for Velero's manifests backup schedule.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule
+
+### Description
+
+The cron expression for the `manifests` backup schedule (default `*/15 * * * *`).
-## .spec.distribution.modules.dr.velero.schedules.ttl
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl
### Description
The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+## .spec.distribution.modules.dr.velero.schedules.install
+
+### Description
+
+Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`.
+
## .spec.distribution.modules.ingress
### Properties
@@ -1627,7 +1735,7 @@ The Time To Live (TTL) of the backups created by the backup schedules (default `
### Description
-the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone
+The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone.
## .spec.distribution.modules.ingress.certManager
@@ -1638,6 +1746,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati
| [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required |
| [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional |
+### Description
+
+Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.
+
## .spec.distribution.modules.ingress.certManager.clusterIssuer
### Properties
@@ -1649,33 +1761,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati
| [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional |
| [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional |
+### Description
+
+Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.
+
## .spec.distribution.modules.ingress.certManager.clusterIssuer.email
### Description
-The email of the cluster issuer
+The email address to use during the certificate issuing process.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.name
### Description
-The name of the cluster issuer
+The name of the clusterIssuer.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers
### Description
-The custom solvers configurations
+The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.type
### Description
-The type of the cluster issuer, must be ***dns01*** or ***http01***
+The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1695,7 +1811,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01***
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.certManager.overrides.tolerations
@@ -1710,13 +1826,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1734,7 +1850,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1757,6 +1873,10 @@ The value of the toleration
| [private](#specdistributionmodulesingressdnsprivate) | `object` | Optional |
| [public](#specdistributionmodulesingressdnspublic) | `object` | Optional |
+### Description
+
+DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.
+
## .spec.distribution.modules.ingress.dns.overrides
### Properties
@@ -1770,7 +1890,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.dns.overrides.tolerations
@@ -1785,13 +1905,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1809,7 +1929,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1831,17 +1951,21 @@ The value of the toleration
| [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required |
| [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required |
+### Description
+
+The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.
+
## .spec.distribution.modules.ingress.dns.private.create
### Description
-If true, the private hosted zone will be created
+By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead.
## .spec.distribution.modules.ingress.dns.private.name
### Description
-The name of the private hosted zone
+The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`.
## .spec.distribution.modules.ingress.dns.public
@@ -1856,13 +1980,13 @@ The name of the private hosted zone
### Description
-If true, the public hosted zone will be created
+By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead.
## .spec.distribution.modules.ingress.dns.public.name
### Description
-The name of the public hosted zone
+The name of the public hosted zone.
## .spec.distribution.modules.ingress.forecastle
@@ -1885,7 +2009,7 @@ The name of the public hosted zone
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.forecastle.overrides.tolerations
@@ -1900,13 +2024,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1924,7 +2048,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1949,7 +2073,7 @@ The value of the toleration
### Description
-Configurations for the nginx ingress controller module
+Configurations for the Ingress nginx controller package.
## .spec.distribution.modules.ingress.nginx.overrides
@@ -1964,7 +2088,7 @@ Configurations for the nginx ingress controller module
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.nginx.overrides.tolerations
@@ -1979,13 +2103,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2003,7 +2127,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2029,11 +2153,11 @@ The value of the toleration
### Description
-The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***
+The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------|
@@ -2051,25 +2175,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or **
| [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required |
| [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required |
+### Description
+
+Kubernetes TLS secret for the ingresses TLS certificate.
+
## .spec.distribution.modules.ingress.nginx.tls.secret.ca
+### Description
+
+The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file.
+
## .spec.distribution.modules.ingress.nginx.tls.secret.cert
### Description
-The certificate file content or you can use the file notation to get the content from a file
+The certificate file's content. You can use the `"{file://}"` notation to get the content from a file.
## .spec.distribution.modules.ingress.nginx.tls.secret.key
+### Description
+
+The signing key file's content. You can use the `"{file://}"` notation to get the content from a file.
+
## .spec.distribution.modules.ingress.nginx.type
### Description
-The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***
+The type of the Ingress nginx controller, options are:
+- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.
+- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.
+- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.
+
+Default is `single`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2087,6 +2228,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or **
| [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the Ingress module.
+
## .spec.distribution.modules.ingress.overrides.ingresses
### Properties
@@ -2109,25 +2254,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or **
### Description
-If true, the ingress will not have authentication
+If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth.
## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host
### Description
-The host of the ingress
+Use this host for the ingress instead of the default one.
## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass
### Description
-The ingress class of the ingress
+Use this ingress class for the ingress instead of the default one.
## .spec.distribution.modules.ingress.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the ingress module
+Set to override the node selector used to place the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations
@@ -2142,13 +2287,13 @@ The node selector to use to place the pods for the ingress module
### Description
-The tolerations that will be added to the pods for the ingress module
+Set to override the tolerations that will be added to the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2166,7 +2311,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2194,6 +2339,10 @@ The value of the toleration
| [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional |
| [type](#specdistributionmodulesloggingtype) | `string` | Required |
+### Description
+
+Configuration for the Logging module.
+
## .spec.distribution.modules.logging.cerebro
### Properties
@@ -2202,6 +2351,10 @@ The value of the toleration
|:-------------------------------------------------------------|:---------|:---------|
| [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional |
+### Description
+
+DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
+
## .spec.distribution.modules.logging.cerebro.overrides
### Properties
@@ -2215,7 +2368,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.cerebro.overrides.tolerations
@@ -2230,13 +2383,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2254,7 +2407,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2284,55 +2437,55 @@ The value of the toleration
### Description
-when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.
+When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.
## .spec.distribution.modules.logging.customOutputs.audit
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.errors
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.events
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.infra
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.ingressNginx
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.kubernetes
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.systemdCommon
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.systemdEtcd
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.loki
@@ -2343,12 +2496,21 @@ This value defines where the output from Flow will be sent. Will be the `spec` s
| [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional |
| [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional |
| [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional |
+| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required |
+
+### Description
+
+Configuration for the Loki package.
## .spec.distribution.modules.logging.loki.backend
+### Description
+
+The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2367,35 +2529,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s
| [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Loki's external storage backend.
+
## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId
### Description
-The access key id of the loki external endpoint
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName
### Description
-The bucket name of the loki external endpoint
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint
### Description
-The endpoint of the loki external endpoint
+External S3-compatible endpoint for Loki's storage.
## .spec.distribution.modules.logging.loki.externalEndpoint.insecure
### Description
-If true, the loki external endpoint will be insecure
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey
### Description
-The secret access key of the loki external endpoint
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.logging.loki.resources
@@ -2419,13 +2585,13 @@ The secret access key of the loki external endpoint
### Description
-The cpu limit for the opensearch pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.loki.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.loki.resources.requests
@@ -2440,13 +2606,23 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.loki.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
+
+## .spec.distribution.modules.logging.loki.tsdbStartDate
+
+### Description
+
+Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.
+
+The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.
+
+Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`.
## .spec.distribution.modules.logging.minio
@@ -2458,6 +2634,10 @@ The memory request for the opensearch pods
| [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Logging's MinIO deployment.
+
## .spec.distribution.modules.logging.minio.overrides
### Properties
@@ -2471,7 +2651,7 @@ The memory request for the opensearch pods
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.minio.overrides.tolerations
@@ -2486,13 +2666,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2510,7 +2690,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2536,19 +2716,19 @@ The value of the toleration
### Description
-The password of the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.logging.minio.rootUser.username
### Description
-The username of the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.logging.minio.storageSize
### Description
-The PVC size for each minio disk, 6 disks total
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.logging.opensearch
@@ -2574,7 +2754,7 @@ The PVC size for each minio disk, 6 disks total
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.opensearch.overrides.tolerations
@@ -2589,13 +2769,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2613,7 +2793,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2648,13 +2828,13 @@ The value of the toleration
### Description
-The cpu limit for the opensearch pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.opensearch.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.opensearch.resources.requests
@@ -2669,29 +2849,29 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.opensearch.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.logging.opensearch.storageSize
### Description
-The storage size for the opensearch pods
+The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`.
## .spec.distribution.modules.logging.opensearch.type
### Description
-The type of the opensearch, must be ***single*** or ***triple***
+The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2706,6 +2886,10 @@ The type of the opensearch, must be ***single*** or ***triple***
|:--------------------------------------------------------------|:---------|:---------|
| [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional |
+### Description
+
+Configuration for the Logging Operator.
+
## .spec.distribution.modules.logging.operator.overrides
### Properties
@@ -2719,7 +2903,7 @@ The type of the opensearch, must be ***single*** or ***triple***
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.operator.overrides.tolerations
@@ -2734,13 +2918,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.operator.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2758,7 +2942,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2781,13 +2965,17 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.logging.overrides.ingresses
## .spec.distribution.modules.logging.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.logging.overrides.tolerations
@@ -2802,13 +2990,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.logging.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2826,7 +3014,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2843,11 +3031,17 @@ The value of the toleration
### Description
-selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+Selects the logging stack. Options are:
+- `none`: will disable the centralized logging.
+- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.
+- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.
+- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+
+Default is `opensearch`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------------|
@@ -2876,7 +3070,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C
### Description
-configuration for the Monitoring module components
+Configuration for the Monitoring module.
## .spec.distribution.modules.monitoring.alertmanager
@@ -2892,19 +3086,19 @@ configuration for the Monitoring module components
### Description
-The webhook url to send deadman switch monitoring, for example to use with healthchecks.io
+The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io.
## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules
### Description
-If true, the default rules will be installed
+Set to false to avoid installing the Prometheus rules (alerts) included with the distribution.
## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl
### Description
-The slack webhook url to send alerts
+The Slack webhook URL where to send the infrastructural and workload alerts to.
## .spec.distribution.modules.monitoring.blackboxExporter
@@ -2927,7 +3121,7 @@ The slack webhook url to send alerts
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations
@@ -2942,13 +3136,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2966,7 +3160,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3010,7 +3204,7 @@ Notice that by default anonymous access is enabled.
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.grafana.overrides.tolerations
@@ -3025,13 +3219,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3049,7 +3243,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3095,7 +3289,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations
@@ -3110,13 +3304,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3134,7 +3328,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3158,15 +3352,19 @@ The value of the toleration
| [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional |
| [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional |
+### Description
+
+Configuration for the Mimir package.
+
## .spec.distribution.modules.monitoring.mimir.backend
### Description
-The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
+The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3185,35 +3383,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
| [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Mimir's external storage backend.
+
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId
### Description
-The access key id of the external mimir backend
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName
### Description
-The bucket name of the external mimir backend
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint
### Description
-The endpoint of the external mimir backend
+The external S3-compatible endpoint for Mimir's storage.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure
### Description
-If true, the external mimir backend will not use tls
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey
### Description
-The secret access key of the external mimir backend
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.monitoring.mimir.overrides
@@ -3228,7 +3430,7 @@ The secret access key of the external mimir backend
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.mimir.overrides.tolerations
@@ -3243,13 +3445,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3267,7 +3469,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3284,7 +3486,7 @@ The value of the toleration
### Description
-The retention time for the mimir pods
+The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days.
## .spec.distribution.modules.monitoring.minio
@@ -3296,6 +3498,10 @@ The retention time for the mimir pods
| [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Monitoring's MinIO deployment.
+
## .spec.distribution.modules.monitoring.minio.overrides
### Properties
@@ -3309,7 +3515,7 @@ The retention time for the mimir pods
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.minio.overrides.tolerations
@@ -3324,13 +3530,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3348,7 +3554,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3374,19 +3580,19 @@ The value of the toleration
### Description
-The password for the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.monitoring.minio.rootUser.username
### Description
-The username for the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.monitoring.minio.storageSize
### Description
-The storage size for the minio pods
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.monitoring.overrides
@@ -3398,13 +3604,17 @@ The storage size for the minio pods
| [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.monitoring.overrides.ingresses
## .spec.distribution.modules.monitoring.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.monitoring.overrides.tolerations
@@ -3419,13 +3629,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.monitoring.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3443,7 +3653,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3498,13 +3708,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the opensearch pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests
@@ -3519,31 +3729,31 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.prometheus.retentionSize
### Description
-The retention size for the k8s Prometheus instance.
+The retention size for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheus.retentionTime
### Description
-The retention time for the k8s Prometheus instance.
+The retention time for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheus.storageSize
### Description
-The storage size for the k8s Prometheus instance.
+The storage size for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheusAgent
@@ -3584,13 +3794,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the opensearch pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests
@@ -3605,28 +3815,30 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.type
### Description
-The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.
+The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.
- `none`: will disable the whole monitoring stack.
- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.
-- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
-- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.
+- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
+- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.
+
+Default is `prometheus`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------------|
@@ -3656,7 +3868,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations
@@ -3671,13 +3883,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3695,7 +3907,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3717,20 +3929,31 @@ The value of the toleration
| [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional |
| [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional |
+### Description
+
+Configuration for the Networking module.
+
## .spec.distribution.modules.networking.overrides
### Properties
| Property | Type | Required |
|:------------------------------------------------------------------------|:---------|:---------|
+| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional |
| [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
+## .spec.distribution.modules.networking.overrides.ingresses
+
## .spec.distribution.modules.networking.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.networking.overrides.tolerations
@@ -3745,13 +3968,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.networking.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3769,7 +3992,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3803,7 +4026,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations
@@ -3818,13 +4041,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3842,7 +4065,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3866,6 +4089,10 @@ The value of the toleration
| [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional |
| [type](#specdistributionmodulespolicytype) | `string` | Required |
+### Description
+
+Configuration for the Policy module.
+
## .spec.distribution.modules.policy.gatekeeper
### Properties
@@ -3877,6 +4104,10 @@ The value of the toleration
| [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required |
| [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional |
+### Description
+
+Configuration for the Gatekeeper package.
+
## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces
### Description
@@ -3887,11 +4118,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en
### Description
-The enforcement action to use for the gatekeeper module
+The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3903,7 +4134,7 @@ The enforcement action to use for the gatekeeper module
### Description
-If true, the default policies will be installed
+Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution.
## .spec.distribution.modules.policy.gatekeeper.overrides
@@ -3918,7 +4149,7 @@ If true, the default policies will be installed
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations
@@ -3933,13 +4164,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3957,7 +4188,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3981,17 +4212,21 @@ The value of the toleration
| [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional |
| [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required |
+### Description
+
+Configuration for the Kyverno package.
+
## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces
### Description
-This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them.
+This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them.
## .spec.distribution.modules.policy.kyverno.installDefaultPolicies
### Description
-If true, the default policies will be installed
+Set to `false` to avoid installing the default Kyverno policies included with distribution.
## .spec.distribution.modules.policy.kyverno.overrides
@@ -4006,7 +4241,7 @@ If true, the default policies will be installed
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.policy.kyverno.overrides.tolerations
@@ -4021,13 +4256,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4045,7 +4280,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4062,11 +4297,11 @@ The value of the toleration
### Description
-The validation failure action to use for the kyverno module
+The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -4083,13 +4318,17 @@ The validation failure action to use for the kyverno module
| [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.policy.overrides.ingresses
## .spec.distribution.modules.policy.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.policy.overrides.tolerations
@@ -4104,13 +4343,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.policy.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4128,7 +4367,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4145,11 +4384,13 @@ The value of the toleration
### Description
-The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***
+The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -4168,6 +4409,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
| [tempo](#specdistributionmodulestracingtempo) | `object` | Optional |
| [type](#specdistributionmodulestracingtype) | `string` | Required |
+### Description
+
+Configuration for the Tracing module.
+
## .spec.distribution.modules.tracing.minio
### Properties
@@ -4178,6 +4423,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
| [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Tracing's MinIO deployment.
+
## .spec.distribution.modules.tracing.minio.overrides
### Properties
@@ -4191,7 +4440,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.tracing.minio.overrides.tolerations
@@ -4206,13 +4455,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4230,7 +4479,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4256,19 +4505,19 @@ The value of the toleration
### Description
-The password for the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.tracing.minio.rootUser.username
### Description
-The username for the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.tracing.minio.storageSize
### Description
-The storage size for the minio pods
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.tracing.overrides
@@ -4280,13 +4529,17 @@ The storage size for the minio pods
| [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.tracing.overrides.ingresses
## .spec.distribution.modules.tracing.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the dr module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.tracing.overrides.tolerations
@@ -4301,13 +4554,13 @@ The node selector to use to place the pods for the dr module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.tracing.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4325,7 +4578,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4349,15 +4602,19 @@ The value of the toleration
| [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional |
| [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional |
+### Description
+
+Configuration for the Tempo package.
+
## .spec.distribution.modules.tracing.tempo.backend
### Description
-The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
+The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4376,35 +4633,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
| [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Tempo's external storage backend.
+
## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId
### Description
-The access key id of the external tempo backend
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName
### Description
-The bucket name of the external tempo backend
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint
### Description
-The endpoint of the external tempo backend
+The external S3-compatible endpoint for Tempo's storage.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure
### Description
-If true, the external tempo backend will not use tls
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey
### Description
-The secret access key of the external tempo backend
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.tracing.tempo.overrides
@@ -4419,7 +4680,7 @@ The secret access key of the external tempo backend
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.tracing.tempo.overrides.tolerations
@@ -4434,13 +4695,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4458,7 +4719,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4475,17 +4736,19 @@ The value of the toleration
### Description
-The retention time for the tempo pods
+The retention time for the traces stored in Tempo.
## .spec.distribution.modules.tracing.type
### Description
-The type of tracing to use, either ***none*** or ***tempo***
+The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.
+
+Default is `tempo`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------|
@@ -4494,6 +4757,10 @@ The type of tracing to use, either ***none*** or ***tempo***
## .spec.distributionVersion
+### Description
+
+Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.
+
### Constraints
**minimum length**: the minimum number of characters for this string is: `1`
@@ -4517,7 +4784,7 @@ The type of tracing to use, either ***none*** or ***tempo***
### Description
-This key defines the VPC that will be created in AWS
+Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.
## .spec.infrastructure.vpc.network
@@ -4532,7 +4799,7 @@ This key defines the VPC that will be created in AWS
### Description
-This is the CIDR of the VPC that will be created
+The network CIDR for the VPC that will be created
### Constraints
@@ -4553,11 +4820,15 @@ This is the CIDR of the VPC that will be created
| [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required |
| [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required |
+### Description
+
+Network CIDRS configuration for private and public subnets.
+
## .spec.infrastructure.vpc.network.subnetsCidrs.private
### Description
-These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created
+The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created
### Constraints
@@ -4573,7 +4844,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the
### Description
-These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created
+The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created
### Constraints
@@ -4605,31 +4876,31 @@ These are the CIDRs for the public subnets, where the public load balancers and
### Description
-This section defines the creation of VPN bastions
+Configuration for the VPN server instances.
## .spec.infrastructure.vpn.bucketNamePrefix
### Description
-This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states
+This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users).
## .spec.infrastructure.vpn.dhParamsBits
### Description
-The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file
+The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file.
## .spec.infrastructure.vpn.diskSize
### Description
-The size of the disk in GB
+The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB.
## .spec.infrastructure.vpn.iamUserNameOverride
### Description
-Overrides the default IAM user name for the VPN
+Overrides IAM user name for the VPN. Default is to use the cluster name.
### Constraints
@@ -4645,25 +4916,25 @@ Overrides the default IAM user name for the VPN
### Description
-The size of the AWS EC2 instance
+The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`.
## .spec.infrastructure.vpn.instances
### Description
-The number of instances to create, 0 to skip the creation
+The number of VPN server instances to create, `0` to skip the creation.
## .spec.infrastructure.vpn.operatorName
### Description
-The username of the account to create in the bastion's operating system
+The username of the account to create in the bastion's operating system.
## .spec.infrastructure.vpn.port
### Description
-The port used by the OpenVPN server
+The port where each OpenVPN server will listen for connections.
## .spec.infrastructure.vpn.ssh
@@ -4679,7 +4950,7 @@ The port used by the OpenVPN server
### Description
-The CIDR enabled in the security group that can access the bastions in SSH
+The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source.
### Constraints
@@ -4695,7 +4966,7 @@ The CIDR enabled in the security group that can access the bastions in SSH
### Description
-The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user
+List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user.
### Constraints
@@ -4705,13 +4976,13 @@ The github user name list that will be used to get the ssh public key that will
### Description
-This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented
+**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system.
## .spec.infrastructure.vpn.vpcId
### Description
-The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted
+The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted.
### Constraints
@@ -4727,7 +4998,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast
### Description
-The CIDR that will be used to assign IP addresses to the VPN clients when connected
+The network CIDR that will be used to assign IP addresses to the VPN clients when connected.
### Constraints
@@ -4751,6 +5022,7 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec
| [logRetentionDays](#speckuberneteslogretentiondays) | `integer` | Optional |
| [logsTypes](#speckuberneteslogstypes) | `array` | Optional |
| [nodeAllowedSshPublicKey](#speckubernetesnodeallowedsshpublickey) | `object` | Required |
+| [nodePoolGlobalAmiType](#speckubernetesnodepoolglobalamitype) | `string` | Required |
| [nodePools](#speckubernetesnodepools) | `array` | Required |
| [nodePoolsLaunchKind](#speckubernetesnodepoolslaunchkind) | `string` | Required |
| [serviceIpV4Cidr](#speckubernetesserviceipv4cidr) | `string` | Optional |
@@ -4758,6 +5030,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec
| [vpcId](#speckubernetesvpcid) | `string` | Optional |
| [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional |
+### Description
+
+Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.
+
## .spec.kubernetes.apiServer
### Properties
@@ -4773,13 +5049,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec
### Description
-This value defines if the API server will be accessible only from the private subnets
+This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`.
## .spec.kubernetes.apiServer.privateAccessCidrs
### Description
-This value defines the CIDRs that will be allowed to access the API server from the private subnets
+The network CIDRs from the private subnets that will be allowed access the Kubernetes API server.
### Constraints
@@ -4795,13 +5071,13 @@ This value defines the CIDRs that will be allowed to access the API server from
### Description
-This value defines if the API server will be accessible from the public subnets
+This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`.
## .spec.kubernetes.apiServer.publicAccessCidrs
### Description
-This value defines the CIDRs that will be allowed to access the API server from the public subnets
+The network CIDRs from the public subnets that will be allowed access the Kubernetes API server.
### Constraints
@@ -4823,11 +5099,17 @@ This value defines the CIDRs that will be allowed to access the API server from
| [roles](#speckubernetesawsauthroles) | `array` | Optional |
| [users](#speckubernetesawsauthusers) | `array` | Optional |
+### Description
+
+Optional additional security configuration for EKS IAM via the `aws-auth` configmap.
+
+Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html
+
## .spec.kubernetes.awsAuth.additionalAccounts
### Description
-This optional array defines additional AWS accounts that will be added to the aws-auth configmap
+This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap.
## .spec.kubernetes.awsAuth.roles
@@ -4841,7 +5123,7 @@ This optional array defines additional AWS accounts that will be added to the aw
### Description
-This optional array defines additional IAM roles that will be added to the aws-auth configmap
+This optional array defines additional IAM roles that will be added to the `aws-auth` configmap.
## .spec.kubernetes.awsAuth.roles.groups
@@ -4871,7 +5153,7 @@ This optional array defines additional IAM roles that will be added to the aws-a
### Description
-This optional array defines additional IAM users that will be added to the aws-auth configmap
+This optional array defines additional IAM users that will be added to the `aws-auth` configmap.
## .spec.kubernetes.awsAuth.users.groups
@@ -4893,7 +5175,7 @@ This optional array defines additional IAM users that will be added to the aws-a
### Description
-Overrides the default IAM role name prefix for the EKS cluster
+Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name.
### Constraints
@@ -4909,7 +5191,37 @@ Overrides the default IAM role name prefix for the EKS cluster
### Description
-Optional Kubernetes Cluster log retention in days. Defaults to 90 days.
+Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.
+
+### Constraints
+
+**enum**: the value of this property must be equal to one of the following integer values:
+
+| Value |
+|:----|
+|0 |
+|1 |
+|3 |
+|5 |
+|7 |
+|14 |
+|30 |
+|60 |
+|90 |
+|120 |
+|150 |
+|180 |
+|365 |
+|400 |
+|545 |
+|731 |
+|1096|
+|1827|
+|2192|
+|2557|
+|2922|
+|3288|
+|3653|
## .spec.kubernetes.logsTypes
@@ -4919,7 +5231,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------------|
@@ -4933,7 +5245,22 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types.
### Description
-This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user
+The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file.
+
+## .spec.kubernetes.nodePoolGlobalAmiType
+
+### Description
+
+Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool.
+
+### Constraints
+
+**enum**: the value of this property must be equal to one of the following string values:
+
+| Value |
+|:-------------|
+|`"alinux2"` |
+|`"alinux2023"`|
## .spec.kubernetes.nodePools
@@ -4952,7 +5279,11 @@ This key contains the ssh public key that can connect to the nodes via SSH using
| [subnetIds](#speckubernetesnodepoolssubnetids) | `array` | Optional |
| [tags](#speckubernetesnodepoolstags) | `object` | Optional |
| [taints](#speckubernetesnodepoolstaints) | `array` | Optional |
-| [type](#speckubernetesnodepoolstype) | `string` | Optional |
+| [type](#speckubernetesnodepoolstype) | `string` | Required |
+
+### Description
+
+Array with all the node pool definitions that will join the cluster. Each item is an object.
## .spec.kubernetes.nodePools.additionalFirewallRules
@@ -4964,6 +5295,10 @@ This key contains the ssh public key that can connect to the nodes via SSH using
| [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional |
| [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional |
+### Description
+
+Optional additional firewall rules that will be attached to the nodes.
+
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks
### Properties
@@ -4979,10 +5314,12 @@ This key contains the ssh public key that can connect to the nodes via SSH using
### Description
-The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored.
+The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details.
### Constraints
+**maximum number of items**: the maximum number of items for this array is: `1`
+
**minimum number of items**: the minimum number of items for this array is: `1`
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks
@@ -5010,6 +5347,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b
| [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required |
| [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required |
+### Description
+
+Port range for the Firewall Rule.
+
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to
@@ -5028,11 +5369,19 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags
+### Description
+
+Additional AWS tags for the Firewall rule.
+
## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type
+### Description
+
+The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -5060,7 +5409,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b
### Description
-The name of the FW rule
+The name of the Firewall rule.
## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports
@@ -5071,6 +5420,10 @@ The name of the FW rule
| [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required |
| [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required |
+### Description
+
+Port range for the Firewall Rule.
+
## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from
## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to
@@ -5079,7 +5432,7 @@ The name of the FW rule
### Description
-The protocol of the FW rule
+The protocol of the Firewall rule.
### Constraints
@@ -5095,23 +5448,23 @@ The protocol of the FW rule
### Description
-If true, the source will be the security group itself
+If `true`, the source will be the security group itself.
## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags
### Description
-The tags of the FW rule
+Additional AWS tags for the Firewall rule.
## .spec.kubernetes.nodePools.additionalFirewallRules.self.type
### Description
-The type of the FW rule can be ingress or egress
+The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -5139,7 +5492,7 @@ The type of the FW rule can be ingress or egress
### Description
-The name of the FW rule
+The name for the additional Firewall rule Security Group.
## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports
@@ -5150,6 +5503,10 @@ The name of the FW rule
| [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required |
| [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required |
+### Description
+
+Port range for the Firewall Rule.
+
## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from
## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to
@@ -5158,7 +5515,7 @@ The name of the FW rule
### Description
-The protocol of the FW rule
+The protocol of the Firewall rule.
### Constraints
@@ -5174,23 +5531,23 @@ The protocol of the FW rule
### Description
-The source security group ID
+The source security group ID.
## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags
### Description
-The tags of the FW rule
+Additional AWS tags for the Firewall rule.
## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type
### Description
-The type of the FW rule can be ingress or egress
+The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -5203,26 +5560,48 @@ The type of the FW rule can be ingress or egress
| Property | Type | Required |
|:------------------------------------------|:---------|:---------|
-| [id](#speckubernetesnodepoolsamiid) | `string` | Required |
-| [owner](#speckubernetesnodepoolsamiowner) | `string` | Required |
+| [id](#speckubernetesnodepoolsamiid) | `string` | Optional |
+| [owner](#speckubernetesnodepoolsamiowner) | `string` | Optional |
+| [type](#speckubernetesnodepoolsamitype) | `string` | Optional |
+
+### Description
+
+Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.
+
+The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.
## .spec.kubernetes.nodePools.ami.id
### Description
-The AMI ID to use for the nodes
+The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`.
## .spec.kubernetes.nodePools.ami.owner
### Description
-The owner of the AMI
+The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`.
+
+## .spec.kubernetes.nodePools.ami.type
+
+### Description
+
+The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.
+
+### Constraints
+
+**enum**: the value of this property must be equal to one of the following string values:
+
+| Value |
+|:-------------|
+|`"alinux2"` |
+|`"alinux2023"`|
## .spec.kubernetes.nodePools.attachedTargetGroups
### Description
-This optional array defines additional target groups to attach to the instances in the node pool
+This optional array defines additional target groups to attach to the instances in the node pool.
### Constraints
@@ -5238,11 +5617,11 @@ This optional array defines additional target groups to attach to the instances
### Description
-The container runtime to use for the nodes
+The container runtime to use in the nodes of the node pool. Default is `containerd`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -5261,31 +5640,45 @@ The container runtime to use for the nodes
| [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional |
| [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional |
+### Description
+
+Configuration for the instances that will be used in the node pool.
+
## .spec.kubernetes.nodePools.instance.maxPods
+### Description
+
+Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.
+
+Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt
+
## .spec.kubernetes.nodePools.instance.spot
### Description
-If true, the nodes will be created as spot instances
+If `true`, the nodes will be created as spot instances. Default is `false`.
## .spec.kubernetes.nodePools.instance.type
### Description
-The instance type to use for the nodes
+The instance type to use for the nodes.
## .spec.kubernetes.nodePools.instance.volumeSize
### Description
-The size of the disk in GB
+The size of the disk in GB.
## .spec.kubernetes.nodePools.instance.volumeType
+### Description
+
+Volume type for the instance disk. Default is `gp2`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-----------|
@@ -5298,13 +5691,13 @@ The size of the disk in GB
### Description
-Kubernetes labels that will be added to the nodes
+Kubernetes labels that will be added to the nodes.
## .spec.kubernetes.nodePools.name
### Description
-The name of the node pool
+The name of the node pool.
## .spec.kubernetes.nodePools.size
@@ -5319,19 +5712,19 @@ The name of the node pool
### Description
-The maximum number of nodes in the node pool
+The maximum number of nodes in the node pool.
## .spec.kubernetes.nodePools.size.min
### Description
-The minimum number of nodes in the node pool
+The minimum number of nodes in the node pool.
## .spec.kubernetes.nodePools.subnetIds
### Description
-This value defines the subnet IDs where the nodes will be created
+Optional list of subnet IDs where to create the nodes.
### Constraints
@@ -5347,7 +5740,7 @@ This value defines the subnet IDs where the nodes will be created
### Description
-AWS tags that will be added to the ASG and EC2 instances
+AWS tags that will be added to the ASG and EC2 instances.
## .spec.kubernetes.nodePools.taints
@@ -5363,9 +5756,13 @@ AWS tags that will be added to the ASG and EC2 instances
## .spec.kubernetes.nodePools.type
+### Description
+
+The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------------|
@@ -5376,11 +5773,11 @@ AWS tags that will be added to the ASG and EC2 instances
### Description
-Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim.
+Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------------------|
@@ -5392,7 +5789,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u
### Description
-This value defines the CIDR that will be used to assign IP addresses to the services
+This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services.
### Constraints
@@ -5408,7 +5805,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv
### Description
-This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted
+Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created.
### Constraints
@@ -5424,7 +5821,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require
### Description
-This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted
+Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created.
### Constraints
@@ -5440,7 +5837,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on
### Description
-Overrides the default IAM role name prefix for the EKS workers
+Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name.
### Constraints
@@ -5474,14 +5871,15 @@ Overrides the default IAM role name prefix for the EKS workers
### Properties
-| Property | Type | Required |
-|:-----------------------------------------------|:---------|:---------|
-| [chart](#specpluginshelmreleaseschart) | `string` | Required |
-| [name](#specpluginshelmreleasesname) | `string` | Required |
-| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
-| [set](#specpluginshelmreleasesset) | `array` | Optional |
-| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
-| [version](#specpluginshelmreleasesversion) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------|:----------|:---------|
+| [chart](#specpluginshelmreleaseschart) | `string` | Required |
+| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional |
+| [name](#specpluginshelmreleasesname) | `string` | Required |
+| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
+| [set](#specpluginshelmreleasesset) | `array` | Optional |
+| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
+| [version](#specpluginshelmreleasesversion) | `string` | Optional |
## .spec.plugins.helm.releases.chart
@@ -5489,6 +5887,12 @@ Overrides the default IAM role name prefix for the EKS workers
The chart of the release
+## .spec.plugins.helm.releases.disableValidationOnInstall
+
+### Description
+
+Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading.
+
## .spec.plugins.helm.releases.name
### Description
@@ -5578,9 +5982,13 @@ The name of the kustomize plugin
## .spec.region
+### Description
+
+Defines in which AWS region the cluster and all the related resources will be created.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-----------------|
@@ -5628,6 +6036,10 @@ This map defines which will be the common tags that will be added to all the res
|:----------------------------------------------|:---------|:---------|
| [terraform](#spectoolsconfigurationterraform) | `object` | Required |
+### Description
+
+Configuration for tools used by furyctl, like Terraform.
+
## .spec.toolsConfiguration.terraform
### Properties
@@ -5644,6 +6056,10 @@ This map defines which will be the common tags that will be added to all the res
|:----------------------------------------------|:---------|:---------|
| [s3](#spectoolsconfigurationterraformstates3) | `object` | Required |
+### Description
+
+Configuration for storing the Terraform state of the cluster.
+
## .spec.toolsConfiguration.terraform.state.s3
### Properties
@@ -5655,17 +6071,21 @@ This map defines which will be the common tags that will be added to all the res
| [region](#spectoolsconfigurationterraformstates3region) | `string` | Required |
| [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional |
+### Description
+
+Configuration for the S3 bucket used to store the Terraform state.
+
## .spec.toolsConfiguration.terraform.state.s3.bucketName
### Description
-This value defines which bucket will be used to store all the states
+This value defines which bucket will be used to store all the states.
## .spec.toolsConfiguration.terraform.state.s3.keyPrefix
### Description
-This value defines which folder will be used to store all the states inside the bucket
+This value defines which folder will be used to store all the states inside the bucket.
### Constraints
@@ -5683,11 +6103,11 @@ This value defines which folder will be used to store all the states inside the
### Description
-This value defines in which region the bucket is located
+This value defines in which region the bucket is located.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-----------------|
@@ -5725,5 +6145,5 @@ This value defines in which region the bucket is located
### Description
-This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region
+This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region.
diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md
index 6118a1540..63db395b1 100644
--- a/docs/schemas/kfddistribution-kfd-v1alpha2.md
+++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md
@@ -2,8 +2,14 @@
This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
## Properties
| Property | Type | Required |
@@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
| [metadata](#metadata) | `object` | Required |
| [spec](#spec) | `object` | Required |
+### Description
+
+KFD modules deployed on top of an existing Kubernetes cluster.
+
## .apiVersion
### Constraints
@@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------------|
@@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
## .metadata.name
+### Description
+
+The name of the cluster. It will also be used as a prefix for all the other resources created.
+
### Constraints
**maximum length**: the maximum number of characters for this string is: `56`
@@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional |
| [tolerations](#specdistributioncommontolerations) | `array` | Optional |
+### Description
+
+Common configuration for all the distribution modules.
+
## .spec.distribution.common.nodeSelector
### Description
-The node selector to use to place the pods for all the KFD modules
+The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
## .spec.distribution.common.provider
@@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules
### Description
-The type of the provider
+The provider type. Don't set. FOR INTERNAL USE ONLY.
## .spec.distribution.common.registry
### Description
-URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).
+URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).
NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too.
@@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep
### Description
-The relative path to the vendor directory, does not need to be changed
+The relative path to the vendor directory, does not need to be changed.
## .spec.distribution.common.tolerations
@@ -131,13 +149,19 @@ The relative path to the vendor directory, does not need to be changed
### Description
-The tolerations that will be added to the pods for all the KFD modules
+An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:
+
+```yaml
+- effect: NoSchedule
+ key: node.kubernetes.io/role
+ value: infra
+```
## .spec.distribution.common.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -155,7 +179,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -202,7 +226,7 @@ The behavior of the configmap
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -410,7 +434,7 @@ The behavior of the secret
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -493,7 +517,7 @@ The type of the secret
### Description
-The kubeconfig file path
+The path to the kubeconfig file.
## .spec.distribution.modules
@@ -522,11 +546,15 @@ The kubeconfig file path
| [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional |
| [provider](#specdistributionmodulesauthprovider) | `object` | Required |
+### Description
+
+Configuration for the Auth module.
+
## .spec.distribution.modules.auth.baseDomain
### Description
-The base domain for the auth module
+The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class.
## .spec.distribution.modules.auth.dex
@@ -539,17 +567,32 @@ The base domain for the auth module
| [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional |
| [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional |
+### Description
+
+Configuration for the Dex package.
+
## .spec.distribution.modules.auth.dex.additionalStaticClients
### Description
-The additional static clients for dex
+Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:
+
+```yaml
+additionalStaticClients:
+ - id: my-custom-client
+ name: "A custom additional static client"
+ redirectURIs:
+ - "https://myapp.tld/redirect"
+ - "https://alias.tld/oidc-callback"
+ secret: supersecretpassword
+```
+Reference: https://dexidp.io/docs/connectors/local/
## .spec.distribution.modules.auth.dex.connectors
### Description
-The connectors for dex
+A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/
## .spec.distribution.modules.auth.dex.expiry
@@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h).
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.auth.dex.overrides.tolerations
@@ -600,13 +643,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.auth.dex.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -624,7 +667,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -647,13 +690,21 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the Auth module.
+
## .spec.distribution.modules.auth.overrides.ingresses
+### Description
+
+Override the definition of the Auth module ingresses.
+
## .spec.distribution.modules.auth.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the auth module
+Set to override the node selector used to place the pods of the Auth module.
## .spec.distribution.modules.auth.overrides.tolerations
@@ -668,13 +719,13 @@ The node selector to use to place the pods for the auth module
### Description
-The tolerations that will be added to the pods for the auth module
+Set to override the tolerations that will be added to the pods of the Auth module.
## .spec.distribution.modules.auth.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -692,7 +743,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -788,7 +839,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -802,7 +853,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -892,27 +943,36 @@ cat ec_private.pem | base64
| [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required |
| [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required |
+### Description
+
+Configuration for the HTTP Basic Auth provider.
+
## .spec.distribution.modules.auth.provider.basicAuth.password
### Description
-The password for the basic auth
+The password for logging in with the HTTP basic authentication.
## .spec.distribution.modules.auth.provider.basicAuth.username
### Description
-The username for the basic auth
+The username for logging in with the HTTP basic authentication.
## .spec.distribution.modules.auth.provider.type
### Description
-The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
+The type of the Auth provider, options are:
+- `none`: will disable authentication in the infrastructural ingresses.
+- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.
+- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------|
@@ -930,6 +990,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
| [type](#specdistributionmodulesdrtype) | `string` | Required |
| [velero](#specdistributionmodulesdrvelero) | `object` | Optional |
+### Description
+
+Configuration for the Disaster Recovery module.
+
## .spec.distribution.modules.dr.overrides
### Properties
@@ -940,13 +1004,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
| [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.dr.overrides.ingresses
## .spec.distribution.modules.dr.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.dr.overrides.tolerations
@@ -961,13 +1029,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.dr.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -985,7 +1053,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1002,11 +1070,13 @@ The value of the toleration
### Description
-The type of the DR, must be ***none*** or ***on-premises***
+The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------|
@@ -1017,12 +1087,17 @@ The type of the DR, must be ***none*** or ***on-premises***
### Properties
-| Property | Type | Required |
-|:---------------------------------------------------------------------|:---------|:---------|
-| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional |
-| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional |
-| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional |
-| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional |
+| Property | Type | Required |
+|:-------------------------------------------------------------------------|:---------|:---------|
+| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional |
+| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional |
+| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional |
+| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional |
+| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional |
+
+### Description
+
+Configuration for the Velero package.
## .spec.distribution.modules.dr.velero.backend
@@ -1032,7 +1107,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1098,7 +1173,7 @@ The secret access key (password) for the external S3-compatible bucket.
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.dr.velero.overrides.tolerations
@@ -1113,13 +1188,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.dr.velero.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1137,7 +1212,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1154,52 +1229,108 @@ The value of the toleration
### Properties
-| Property | Type | Required |
-|:------------------------------------------------------------|:----------|:---------|
-| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional |
-| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
-| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional |
+| Property | Type | Required |
+|:--------------------------------------------------------------------|:----------|:---------|
+| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional |
+| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
### Description
Configuration for Velero's backup schedules.
-## .spec.distribution.modules.dr.velero.schedules.cron
+## .spec.distribution.modules.dr.velero.schedules.definitions
### Properties
-| Property | Type | Required |
-|:--------------------------------------------------------------------|:---------|:---------|
-| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional |
-| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------|:---------|:---------|
+| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional |
+| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional |
### Description
-Configuration for Velero's schedules cron.
+Configuration for Velero schedules.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full
-## .spec.distribution.modules.dr.velero.schedules.cron.full
+### Properties
+
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------------------|:----------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional |
+| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional |
+
+### Description
+
+Configuration for Velero's manifests backup schedule.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule
### Description
The cron expression for the `full` backup schedule (default `0 1 * * *`).
-## .spec.distribution.modules.dr.velero.schedules.cron.manifests
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData
+
+### Description
+
+EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl
+
+### Description
+
+The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests
+
+### Properties
+
+| Property | Type | Required |
+|:----------------------------------------------------------------------------------|:---------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional |
+
+### Description
+
+Configuration for Velero's manifests backup schedule.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule
### Description
The cron expression for the `manifests` backup schedule (default `*/15 * * * *`).
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl
+
+### Description
+
+The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+
## .spec.distribution.modules.dr.velero.schedules.install
### Description
Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`.
-## .spec.distribution.modules.dr.velero.schedules.ttl
+## .spec.distribution.modules.dr.velero.snapshotController
+
+### Properties
+
+| Property | Type | Required |
+|:---------------------------------------------------------------------|:----------|:---------|
+| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional |
### Description
-The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+Configuration for the additional snapshotController component installation.
+
+## .spec.distribution.modules.dr.velero.snapshotController.install
+
+### Description
+
+Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in.
## .spec.distribution.modules.ingress
@@ -1217,7 +1348,7 @@ The Time To Live (TTL) of the backups created by the backup schedules (default `
### Description
-the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone
+The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class.
## .spec.distribution.modules.ingress.certManager
@@ -1228,6 +1359,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati
| [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required |
| [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional |
+### Description
+
+Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.
+
## .spec.distribution.modules.ingress.certManager.clusterIssuer
### Properties
@@ -1239,33 +1374,37 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati
| [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional |
| [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional |
+### Description
+
+Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.
+
## .spec.distribution.modules.ingress.certManager.clusterIssuer.email
### Description
-The email of the cluster issuer
+The email address to use during the certificate issuing process.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.name
### Description
-The name of the cluster issuer
+The name of the clusterIssuer.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers
### Description
-The custom solvers configurations
+The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.type
### Description
-The type of the cluster issuer, must be ***http01***
+The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1284,7 +1423,7 @@ The type of the cluster issuer, must be ***http01***
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.certManager.overrides.tolerations
@@ -1299,13 +1438,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1323,7 +1462,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1357,7 +1496,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.forecastle.overrides.tolerations
@@ -1372,13 +1511,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1396,7 +1535,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1421,7 +1560,7 @@ The value of the toleration
### Description
-Configurations for the nginx ingress controller module
+Configurations for the Ingress nginx controller package.
## .spec.distribution.modules.ingress.nginx.overrides
@@ -1436,7 +1575,7 @@ Configurations for the nginx ingress controller module
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.ingress.nginx.overrides.tolerations
@@ -1451,13 +1590,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1475,7 +1614,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1501,11 +1640,11 @@ The value of the toleration
### Description
-The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***
+The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------|
@@ -1523,25 +1662,42 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or **
| [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required |
| [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required |
+### Description
+
+Kubernetes TLS secret for the ingresses TLS certificate.
+
## .spec.distribution.modules.ingress.nginx.tls.secret.ca
+### Description
+
+The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file.
+
## .spec.distribution.modules.ingress.nginx.tls.secret.cert
### Description
-The certificate file content or you can use the file notation to get the content from a file
+The certificate file's content. You can use the `"{file://}"` notation to get the content from a file.
## .spec.distribution.modules.ingress.nginx.tls.secret.key
+### Description
+
+The signing key file's content. You can use the `"{file://}"` notation to get the content from a file.
+
## .spec.distribution.modules.ingress.nginx.type
### Description
-The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***
+The type of the Ingress nginx controller, options are:
+- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.
+- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.
+- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.
+
+Default is `single`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1559,6 +1715,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or **
| [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the Ingress module.
+
## .spec.distribution.modules.ingress.overrides.ingresses
### Properties
@@ -1581,25 +1741,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or **
### Description
-If true, the ingress will not have authentication
+If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth.
## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host
### Description
-The host of the ingress
+Use this host for the ingress instead of the default one.
## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass
### Description
-The ingress class of the ingress
+Use this ingress class for the ingress instead of the default one.
## .spec.distribution.modules.ingress.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the ingress module
+Set to override the node selector used to place the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations
@@ -1614,13 +1774,13 @@ The node selector to use to place the pods for the ingress module
### Description
-The tolerations that will be added to the pods for the ingress module
+Set to override the tolerations that will be added to the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1638,7 +1798,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1666,6 +1826,10 @@ The value of the toleration
| [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional |
| [type](#specdistributionmodulesloggingtype) | `string` | Required |
+### Description
+
+Configuration for the Logging module.
+
## .spec.distribution.modules.logging.cerebro
### Properties
@@ -1674,6 +1838,10 @@ The value of the toleration
|:-------------------------------------------------------------|:---------|:---------|
| [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional |
+### Description
+
+DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
+
## .spec.distribution.modules.logging.cerebro.overrides
### Properties
@@ -1687,7 +1855,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.cerebro.overrides.tolerations
@@ -1702,13 +1870,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1726,7 +1894,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1756,55 +1924,55 @@ The value of the toleration
### Description
-when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.
+When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.
## .spec.distribution.modules.logging.customOutputs.audit
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.errors
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.events
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.infra
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.ingressNginx
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.kubernetes
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.systemdCommon
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.customOutputs.systemdEtcd
### Description
-This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow.
+This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`
## .spec.distribution.modules.logging.loki
@@ -1815,12 +1983,21 @@ This value defines where the output from Flow will be sent. Will be the `spec` s
| [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional |
| [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional |
| [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional |
+| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required |
+
+### Description
+
+Configuration for the Loki package.
## .spec.distribution.modules.logging.loki.backend
+### Description
+
+The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1839,35 +2016,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s
| [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Loki's external storage backend.
+
## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId
### Description
-The access key id of the loki external endpoint
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName
### Description
-The bucket name of the loki external endpoint
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint
### Description
-The endpoint of the loki external endpoint
+External S3-compatible endpoint for Loki's storage.
## .spec.distribution.modules.logging.loki.externalEndpoint.insecure
### Description
-If true, the loki external endpoint will be insecure
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey
### Description
-The secret access key of the loki external endpoint
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.logging.loki.resources
@@ -1891,13 +2072,13 @@ The secret access key of the loki external endpoint
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.loki.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.loki.resources.requests
@@ -1912,13 +2093,23 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.loki.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
+
+## .spec.distribution.modules.logging.loki.tsdbStartDate
+
+### Description
+
+Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.
+
+The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.
+
+Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`.
## .spec.distribution.modules.logging.minio
@@ -1930,6 +2121,10 @@ The memory request for the opensearch pods
| [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Logging's MinIO deployment.
+
## .spec.distribution.modules.logging.minio.overrides
### Properties
@@ -1943,7 +2138,7 @@ The memory request for the opensearch pods
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.minio.overrides.tolerations
@@ -1958,13 +2153,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1982,7 +2177,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2008,19 +2203,19 @@ The value of the toleration
### Description
-The password of the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.logging.minio.rootUser.username
### Description
-The username of the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.logging.minio.storageSize
### Description
-The PVC size for each minio disk, 6 disks total
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.logging.opensearch
@@ -2046,7 +2241,7 @@ The PVC size for each minio disk, 6 disks total
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.opensearch.overrides.tolerations
@@ -2061,13 +2256,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2085,7 +2280,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2120,13 +2315,13 @@ The value of the toleration
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.opensearch.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.opensearch.resources.requests
@@ -2141,29 +2336,29 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.opensearch.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.logging.opensearch.storageSize
### Description
-The storage size for the opensearch pods
+The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`.
## .spec.distribution.modules.logging.opensearch.type
### Description
-The type of the opensearch, must be ***single*** or ***triple***
+The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2178,6 +2373,10 @@ The type of the opensearch, must be ***single*** or ***triple***
|:--------------------------------------------------------------|:---------|:---------|
| [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional |
+### Description
+
+Configuration for the Logging Operator.
+
## .spec.distribution.modules.logging.operator.overrides
### Properties
@@ -2191,7 +2390,7 @@ The type of the opensearch, must be ***single*** or ***triple***
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.logging.operator.overrides.tolerations
@@ -2206,13 +2405,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.logging.operator.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2230,7 +2429,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2253,13 +2452,17 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.logging.overrides.ingresses
## .spec.distribution.modules.logging.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.logging.overrides.tolerations
@@ -2274,13 +2477,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.logging.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2298,7 +2501,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2315,11 +2518,17 @@ The value of the toleration
### Description
-selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+Selects the logging stack. Options are:
+- `none`: will disable the centralized logging.
+- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.
+- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.
+- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+
+Default is `opensearch`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------------|
@@ -2348,7 +2557,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C
### Description
-configuration for the Monitoring module components
+Configuration for the Monitoring module.
## .spec.distribution.modules.monitoring.alertmanager
@@ -2364,19 +2573,19 @@ configuration for the Monitoring module components
### Description
-The webhook url to send deadman switch monitoring, for example to use with healthchecks.io
+The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io.
## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules
### Description
-If true, the default rules will be installed
+Set to false to avoid installing the Prometheus rules (alerts) included with the distribution.
## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl
### Description
-The slack webhook url to send alerts
+The Slack webhook URL where to send the infrastructural and workload alerts to.
## .spec.distribution.modules.monitoring.blackboxExporter
@@ -2399,7 +2608,7 @@ The slack webhook url to send alerts
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations
@@ -2414,13 +2623,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2438,7 +2647,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2482,7 +2691,7 @@ Notice that by default anonymous access is enabled.
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.grafana.overrides.tolerations
@@ -2497,13 +2706,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2521,7 +2730,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2567,7 +2776,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations
@@ -2582,13 +2791,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2606,7 +2815,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2630,15 +2839,19 @@ The value of the toleration
| [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional |
| [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional |
+### Description
+
+Configuration for the Mimir package.
+
## .spec.distribution.modules.monitoring.mimir.backend
### Description
-The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
+The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2657,35 +2870,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
| [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Mimir's external storage backend.
+
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId
### Description
-The access key id of the external mimir backend
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName
### Description
-The bucket name of the external mimir backend
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint
### Description
-The endpoint of the external mimir backend
+The external S3-compatible endpoint for Mimir's storage.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure
### Description
-If true, the external mimir backend will not use tls
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey
### Description
-The secret access key of the external mimir backend
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.monitoring.mimir.overrides
@@ -2700,7 +2917,7 @@ The secret access key of the external mimir backend
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.mimir.overrides.tolerations
@@ -2715,13 +2932,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2739,7 +2956,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2756,7 +2973,7 @@ The value of the toleration
### Description
-The retention time for the mimir pods
+The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days.
## .spec.distribution.modules.monitoring.minio
@@ -2768,6 +2985,10 @@ The retention time for the mimir pods
| [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Monitoring's MinIO deployment.
+
## .spec.distribution.modules.monitoring.minio.overrides
### Properties
@@ -2781,7 +3002,7 @@ The retention time for the mimir pods
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.minio.overrides.tolerations
@@ -2796,13 +3017,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2820,7 +3041,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2846,19 +3067,19 @@ The value of the toleration
### Description
-The password for the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.monitoring.minio.rootUser.username
### Description
-The username for the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.monitoring.minio.storageSize
### Description
-The storage size for the minio pods
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.monitoring.overrides
@@ -2870,13 +3091,17 @@ The storage size for the minio pods
| [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.monitoring.overrides.ingresses
## .spec.distribution.modules.monitoring.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.monitoring.overrides.tolerations
@@ -2891,13 +3116,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.monitoring.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2915,7 +3140,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2970,13 +3195,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests
@@ -2991,31 +3216,31 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.prometheus.retentionSize
### Description
-The retention size for the k8s Prometheus instance.
+The retention size for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheus.retentionTime
### Description
-The retention time for the K8s Prometheus instance.
+The retention time for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheus.storageSize
### Description
-The storage size for the k8s Prometheus instance.
+The storage size for the `k8s` Prometheus instance.
## .spec.distribution.modules.monitoring.prometheusAgent
@@ -3056,13 +3281,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory
### Description
-The memory limit for the opensearch pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests
@@ -3077,28 +3302,30 @@ The memory limit for the opensearch pods
### Description
-The cpu request for the prometheus pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory
### Description
-The memory request for the opensearch pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.type
### Description
-The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.
+The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.
- `none`: will disable the whole monitoring stack.
-- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.
-- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
-- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.
+- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.
+- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
+- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.
+
+Default is `prometheus`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------------|
@@ -3128,7 +3355,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations
@@ -3143,13 +3370,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3167,7 +3394,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3191,6 +3418,10 @@ The value of the toleration
| [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional |
| [type](#specdistributionmodulesnetworkingtype) | `string` | Required |
+### Description
+
+Configuration for the Networking module.
+
## .spec.distribution.modules.networking.cilium
### Properties
@@ -3203,6 +3434,10 @@ The value of the toleration
## .spec.distribution.modules.networking.cilium.maskSize
+### Description
+
+The mask size to use for the Pods network on each node.
+
## .spec.distribution.modules.networking.cilium.overrides
### Properties
@@ -3216,7 +3451,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.networking.cilium.overrides.tolerations
@@ -3231,13 +3466,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3255,7 +3490,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3270,6 +3505,10 @@ The value of the toleration
## .spec.distribution.modules.networking.cilium.podCidr
+### Description
+
+Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`.
+
### Constraints
**pattern**: the string must match the following regular expression:
@@ -3290,13 +3529,17 @@ The value of the toleration
| [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.networking.overrides.ingresses
## .spec.distribution.modules.networking.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.networking.overrides.tolerations
@@ -3311,13 +3554,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.networking.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3335,7 +3578,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3369,7 +3612,7 @@ The value of the toleration
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations
@@ -3384,13 +3627,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3408,7 +3651,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3425,11 +3668,11 @@ The value of the toleration
### Description
-The type of networking to use, either ***none***, ***calico*** or ***cilium***
+The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3448,6 +3691,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium***
| [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional |
| [type](#specdistributionmodulespolicytype) | `string` | Required |
+### Description
+
+Configuration for the Policy module.
+
## .spec.distribution.modules.policy.gatekeeper
### Properties
@@ -3459,6 +3706,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium***
| [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required |
| [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional |
+### Description
+
+Configuration for the Gatekeeper package.
+
## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces
### Description
@@ -3469,11 +3720,11 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en
### Description
-The enforcement action to use for the gatekeeper module
+The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3485,7 +3736,7 @@ The enforcement action to use for the gatekeeper module
### Description
-If true, the default policies will be installed
+Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution.
## .spec.distribution.modules.policy.gatekeeper.overrides
@@ -3500,7 +3751,7 @@ If true, the default policies will be installed
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations
@@ -3515,13 +3766,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3539,7 +3790,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3563,17 +3814,21 @@ The value of the toleration
| [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional |
| [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required |
+### Description
+
+Configuration for the Kyverno package.
+
## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces
### Description
-This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them.
+This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them.
## .spec.distribution.modules.policy.kyverno.installDefaultPolicies
### Description
-If true, the default policies will be installed
+Set to `false` to avoid installing the default Kyverno policies included with distribution.
## .spec.distribution.modules.policy.kyverno.overrides
@@ -3588,7 +3843,7 @@ If true, the default policies will be installed
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.policy.kyverno.overrides.tolerations
@@ -3603,13 +3858,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3627,7 +3882,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3644,11 +3899,11 @@ The value of the toleration
### Description
-The validation failure action to use for the kyverno module
+The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -3665,13 +3920,17 @@ The validation failure action to use for the kyverno module
| [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.policy.overrides.ingresses
## .spec.distribution.modules.policy.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.policy.overrides.tolerations
@@ -3686,13 +3945,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.policy.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3710,7 +3969,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3727,11 +3986,13 @@ The value of the toleration
### Description
-The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***
+The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.
+
+Default is `none`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -3750,6 +4011,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
| [tempo](#specdistributionmodulestracingtempo) | `object` | Optional |
| [type](#specdistributionmodulestracingtype) | `string` | Required |
+### Description
+
+Configuration for the Tracing module.
+
## .spec.distribution.modules.tracing.minio
### Properties
@@ -3760,6 +4025,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
| [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional |
| [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional |
+### Description
+
+Configuration for Tracing's MinIO deployment.
+
## .spec.distribution.modules.tracing.minio.overrides
### Properties
@@ -3773,7 +4042,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno**
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.tracing.minio.overrides.tolerations
@@ -3788,13 +4057,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3812,7 +4081,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3838,19 +4107,19 @@ The value of the toleration
### Description
-The password for the minio root user
+The password for the default MinIO root user.
## .spec.distribution.modules.tracing.minio.rootUser.username
### Description
-The username for the minio root user
+The username for the default MinIO root user.
## .spec.distribution.modules.tracing.minio.storageSize
### Description
-The storage size for the minio pods
+The PVC size for each MinIO disk, 6 disks total.
## .spec.distribution.modules.tracing.overrides
@@ -3862,13 +4131,17 @@ The storage size for the minio pods
| [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional |
| [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional |
+### Description
+
+Override the common configuration with a particular configuration for the module.
+
## .spec.distribution.modules.tracing.overrides.ingresses
## .spec.distribution.modules.tracing.overrides.nodeSelector
### Description
-The node selector to use to place the pods for the security module
+Set to override the node selector used to place the pods of the module.
## .spec.distribution.modules.tracing.overrides.tolerations
@@ -3883,13 +4156,13 @@ The node selector to use to place the pods for the security module
### Description
-The tolerations that will be added to the pods for the monitoring module
+Set to override the tolerations that will be added to the pods of the module.
## .spec.distribution.modules.tracing.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3907,7 +4180,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3931,15 +4204,19 @@ The value of the toleration
| [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional |
| [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional |
+### Description
+
+Configuration for the Tempo package.
+
## .spec.distribution.modules.tracing.tempo.backend
### Description
-The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
+The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3958,35 +4235,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
| [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional |
| [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional |
+### Description
+
+Configuration for Tempo's external storage backend.
+
## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId
### Description
-The access key id of the external tempo backend
+The access key ID (username) for the external S3-compatible bucket.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName
### Description
-The bucket name of the external tempo backend
+The bucket name of the external S3-compatible object storage.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint
### Description
-The endpoint of the external tempo backend
+The external S3-compatible endpoint for Tempo's storage.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure
### Description
-If true, the external tempo backend will not use tls
+If true, will use HTTP as protocol instead of HTTPS.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey
### Description
-The secret access key of the external tempo backend
+The secret access key (password) for the external S3-compatible bucket.
## .spec.distribution.modules.tracing.tempo.overrides
@@ -4001,7 +4282,7 @@ The secret access key of the external tempo backend
### Description
-The node selector to use to place the pods for the minio module
+Set to override the node selector used to place the pods of the package.
## .spec.distribution.modules.tracing.tempo.overrides.tolerations
@@ -4016,13 +4297,13 @@ The node selector to use to place the pods for the minio module
### Description
-The tolerations that will be added to the pods for the cert-manager module
+Set to override the tolerations that will be added to the pods of the package.
## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4040,7 +4321,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4057,17 +4338,19 @@ The value of the toleration
### Description
-The retention time for the tempo pods
+The retention time for the traces stored in Tempo.
## .spec.distribution.modules.tracing.type
### Description
-The type of tracing to use, either ***none*** or ***tempo***
+The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.
+
+Default is `tempo`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------|
@@ -4076,6 +4359,10 @@ The type of tracing to use, either ***none*** or ***tempo***
## .spec.distributionVersion
+### Description
+
+Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.
+
### Constraints
**minimum length**: the minimum number of characters for this string is: `1`
@@ -4102,14 +4389,15 @@ The type of tracing to use, either ***none*** or ***tempo***
### Properties
-| Property | Type | Required |
-|:-----------------------------------------------|:---------|:---------|
-| [chart](#specpluginshelmreleaseschart) | `string` | Required |
-| [name](#specpluginshelmreleasesname) | `string` | Required |
-| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
-| [set](#specpluginshelmreleasesset) | `array` | Optional |
-| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
-| [version](#specpluginshelmreleasesversion) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------|:----------|:---------|
+| [chart](#specpluginshelmreleaseschart) | `string` | Required |
+| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional |
+| [name](#specpluginshelmreleasesname) | `string` | Required |
+| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
+| [set](#specpluginshelmreleasesset) | `array` | Optional |
+| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
+| [version](#specpluginshelmreleasesversion) | `string` | Optional |
## .spec.plugins.helm.releases.chart
@@ -4117,6 +4405,12 @@ The type of tracing to use, either ***none*** or ***tempo***
The chart of the release
+## .spec.plugins.helm.releases.disableValidationOnInstall
+
+### Description
+
+Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading.
+
## .spec.plugins.helm.releases.name
### Description
diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md
index f620b0661..9bb0ae9d0 100644
--- a/docs/schemas/onpremises-kfd-v1alpha2.md
+++ b/docs/schemas/onpremises-kfd-v1alpha2.md
@@ -2,8 +2,14 @@
This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises.
-An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl).
+An example configuration file can be created by running the following command:
+```bash
+furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster
+```
+
+> [!NOTE]
+> Replace the version with your desired version of KFD.
## Properties
| Property | Type | Required |
@@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
| [metadata](#metadata) | `object` | Required |
| [spec](#spec) | `object` | Required |
+### Description
+
+A KFD Cluster deployed on top of a set of existing VMs.
+
## .apiVersion
### Constraints
@@ -29,7 +39,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -80,23 +90,30 @@ The name of the cluster. It will also be used as a prefix for all the other reso
### Properties
-| Property | Type | Required |
-|:----------------------------------------------------------------|:---------|:---------|
-| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional |
-| [provider](#specdistributioncommonprovider) | `object` | Optional |
-| [registry](#specdistributioncommonregistry) | `string` | Optional |
-| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional |
-| [tolerations](#specdistributioncommontolerations) | `array` | Optional |
+| Property | Type | Required |
+|:------------------------------------------------------------------------|:----------|:---------|
+| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional |
+| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional |
+| [provider](#specdistributioncommonprovider) | `object` | Optional |
+| [registry](#specdistributioncommonregistry) | `string` | Optional |
+| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional |
+| [tolerations](#specdistributioncommontolerations) | `array` | Optional |
### Description
Common configuration for all the distribution modules.
+## .spec.distribution.common.networkPoliciesEnabled
+
+### Description
+
+EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules.
+
## .spec.distribution.common.nodeSelector
### Description
-The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`
+The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
## .spec.distribution.common.provider
@@ -118,6 +135,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY.
URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).
+NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too.
+
## .spec.distribution.common.relativeVendorPath
### Description
@@ -149,7 +168,7 @@ An array with the tolerations that will be added to the pods for all the KFD mod
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -167,7 +186,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -214,7 +233,7 @@ The behavior of the configmap
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -422,7 +441,7 @@ The behavior of the secret
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -537,7 +556,7 @@ Configuration for the Auth module.
### Description
-Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class.
+The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class.
## .spec.distribution.modules.auth.dex
@@ -632,7 +651,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -650,7 +669,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -827,7 +846,7 @@ Set to override the tolerations that will be added to the pods of the Auth modul
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -845,7 +864,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -941,7 +960,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -955,7 +974,7 @@ override default routes for KFD components
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1070,9 +1089,11 @@ The type of the Auth provider, options are:
- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.
- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.
+Default is `none`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------|
@@ -1135,7 +1156,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1153,7 +1174,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1172,9 +1193,11 @@ The value of the toleration
The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.
+Default is `none`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------|
@@ -1185,12 +1208,13 @@ The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disab
### Properties
-| Property | Type | Required |
-|:---------------------------------------------------------------------|:---------|:---------|
-| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional |
-| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional |
-| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional |
-| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional |
+| Property | Type | Required |
+|:-------------------------------------------------------------------------|:---------|:---------|
+| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional |
+| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional |
+| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional |
+| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional |
+| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional |
### Description
@@ -1204,7 +1228,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1291,7 +1315,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1309,7 +1333,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1326,52 +1350,108 @@ The value of the toleration
### Properties
-| Property | Type | Required |
-|:------------------------------------------------------------|:----------|:---------|
-| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional |
-| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
-| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional |
+| Property | Type | Required |
+|:--------------------------------------------------------------------|:----------|:---------|
+| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional |
+| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional |
### Description
Configuration for Velero's backup schedules.
-## .spec.distribution.modules.dr.velero.schedules.cron
+## .spec.distribution.modules.dr.velero.schedules.definitions
### Properties
-| Property | Type | Required |
-|:--------------------------------------------------------------------|:---------|:---------|
-| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional |
-| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------|:---------|:---------|
+| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional |
+| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional |
### Description
-Configuration for Velero's schedules cron.
+Configuration for Velero schedules.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full
-## .spec.distribution.modules.dr.velero.schedules.cron.full
+### Properties
+
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------------------|:----------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional |
+| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional |
+
+### Description
+
+Configuration for Velero's manifests backup schedule.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule
### Description
The cron expression for the `full` backup schedule (default `0 1 * * *`).
-## .spec.distribution.modules.dr.velero.schedules.cron.manifests
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData
+
+### Description
+
+EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl
+
+### Description
+
+The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests
+
+### Properties
+
+| Property | Type | Required |
+|:----------------------------------------------------------------------------------|:---------|:---------|
+| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional |
+| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional |
+
+### Description
+
+Configuration for Velero's manifests backup schedule.
+
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule
### Description
The cron expression for the `manifests` backup schedule (default `*/15 * * * *`).
+## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl
+
+### Description
+
+The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+
## .spec.distribution.modules.dr.velero.schedules.install
### Description
Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`.
-## .spec.distribution.modules.dr.velero.schedules.ttl
+## .spec.distribution.modules.dr.velero.snapshotController
+
+### Properties
+
+| Property | Type | Required |
+|:---------------------------------------------------------------------|:----------|:---------|
+| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional |
### Description
-The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL.
+Configuration for the additional snapshotController component installation.
+
+## .spec.distribution.modules.dr.velero.snapshotController.install
+
+### Description
+
+Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in.
## .spec.distribution.modules.ingress
@@ -1390,7 +1470,7 @@ The Time To Live (TTL) of the backups created by the backup schedules (default `
### Description
-The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class.
+The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class.
## .spec.distribution.modules.ingress.certManager
@@ -1430,13 +1510,13 @@ The email address to use during the certificate issuing process.
### Description
-Name of the clusterIssuer
+The name of the clusterIssuer.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers
### Description
-List of challenge solvers to use instead of the default one for the `http01` challenge.
+The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field.
## .spec.distribution.modules.ingress.certManager.clusterIssuer.type
@@ -1446,7 +1526,7 @@ The type of the clusterIssuer. Only `http01` challenge is supported for on-premi
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1486,7 +1566,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1504,7 +1584,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1559,7 +1639,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1577,7 +1657,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1602,7 +1682,7 @@ The value of the toleration
### Description
-Configurations for the nginx ingress controller package.
+Configurations for the Ingress nginx controller package.
## .spec.distribution.modules.ingress.nginx.overrides
@@ -1638,7 +1718,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1656,7 +1736,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1686,7 +1766,7 @@ The provider of the TLS certificates for the ingresses, one of: `none`, `certMan
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------------|
@@ -1730,14 +1810,16 @@ The signing key file's content. You can use the `"{file://}"` notation to
### Description
-The type of the nginx ingress controller, options are:
+The type of the Ingress nginx controller, options are:
- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.
- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.
- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.
+Default is `single`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1799,7 +1881,7 @@ Use this ingress class for the ingress instead of the default one.
### Description
-Set to override the node selector used to place the pods of the Ingress module
+Set to override the node selector used to place the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations
@@ -1814,13 +1896,13 @@ Set to override the node selector used to place the pods of the Ingress module
### Description
-Set to override the tolerations that will be added to the pods of the Ingress module
+Set to override the tolerations that will be added to the pods of the Ingress module.
## .spec.distribution.modules.ingress.overrides.tolerations.effect
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1838,7 +1920,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -1882,7 +1964,7 @@ Configuration for the Logging module.
### Description
-DEPRECATED in latest versions of KFD.
+DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
## .spec.distribution.modules.logging.cerebro.overrides
@@ -1918,7 +2000,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -1936,7 +2018,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2025,6 +2107,7 @@ This value defines where the output from the `systemdEtcd` Flow will be sent. Th
| [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional |
| [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional |
| [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional |
+| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required |
### Description
@@ -2038,7 +2121,7 @@ The storage backend type for Loki. `minio` will use an in-cluster MinIO deployme
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2113,13 +2196,13 @@ The secret access key (password) for the external S3-compatible bucket.
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.loki.resources.limits.memory
### Description
-The memory limit for the prometheus pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.loki.resources.requests
@@ -2134,13 +2217,23 @@ The memory limit for the prometheus pods
### Description
-The cpu request for the loki pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.loki.resources.requests.memory
### Description
-The memory request for the prometheus pods
+The memory request for the Pod. Example: `500M`.
+
+## .spec.distribution.modules.logging.loki.tsdbStartDate
+
+### Description
+
+Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.
+
+The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.
+
+Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`.
## .spec.distribution.modules.logging.minio
@@ -2190,7 +2283,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2208,7 +2301,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2293,7 +2386,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2311,7 +2404,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2346,13 +2439,13 @@ The value of the toleration
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.logging.opensearch.resources.limits.memory
### Description
-The memory limit for the prometheus pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.logging.opensearch.resources.requests
@@ -2367,19 +2460,19 @@ The memory limit for the prometheus pods
### Description
-The cpu request for the loki pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.logging.opensearch.resources.requests.memory
### Description
-The memory request for the prometheus pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.logging.opensearch.storageSize
### Description
-The storage size for the OpenSearch volumes.
+The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`.
## .spec.distribution.modules.logging.opensearch.type
@@ -2389,7 +2482,7 @@ The type of OpenSearch deployment. One of: `single` for a single replica or `tri
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2442,7 +2535,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2460,7 +2553,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2514,7 +2607,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2532,7 +2625,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2552,12 +2645,14 @@ The value of the toleration
Selects the logging stack. Options are:
- `none`: will disable the centralized logging.
- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.
-- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.
-- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.
+- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.
+
+Default is `opensearch`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------------|
@@ -2602,7 +2697,7 @@ Configuration for the Monitoring module.
### Description
-The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io
+The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io.
## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules
@@ -2658,7 +2753,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2676,7 +2771,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2741,7 +2836,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2759,7 +2854,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2826,7 +2921,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2844,7 +2939,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -2880,7 +2975,7 @@ The storage backend type for Mimir. `minio` will use an in-cluster MinIO deploym
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2919,7 +3014,7 @@ The bucket name of the external S3-compatible object storage.
### Description
-External S3-compatible endpoint for Mimir's storage.
+The external S3-compatible endpoint for Mimir's storage.
## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure
@@ -2967,7 +3062,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -2985,7 +3080,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3052,7 +3147,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3070,7 +3165,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3151,7 +3246,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3169,7 +3264,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3224,13 +3319,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory
### Description
-The memory limit for the prometheus pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests
@@ -3245,13 +3340,13 @@ The memory limit for the prometheus pods
### Description
-The cpu request for the loki pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory
### Description
-The memory request for the prometheus pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.prometheus.retentionSize
@@ -3310,13 +3405,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver.
### Description
-The cpu limit for the loki pods
+The CPU limit for the Pod. Example: `1000m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory
### Description
-The memory limit for the prometheus pods
+The memory limit for the Pod. Example: `1G`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests
@@ -3331,13 +3426,13 @@ The memory limit for the prometheus pods
### Description
-The cpu request for the loki pods
+The CPU request for the Pod, in cores. Example: `500m`.
## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory
### Description
-The memory request for the prometheus pods
+The memory request for the Pod. Example: `500M`.
## .spec.distribution.modules.monitoring.type
@@ -3346,13 +3441,15 @@ The memory request for the prometheus pods
The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.
- `none`: will disable the whole monitoring stack.
-- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.
-- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
+- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.
+- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.
- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.
+Default is `prometheus`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:------------------|
@@ -3403,7 +3500,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3421,7 +3518,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3499,7 +3596,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3517,7 +3614,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3587,7 +3684,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3605,7 +3702,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3660,7 +3757,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3678,7 +3775,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3695,11 +3792,11 @@ The value of the toleration
### Description
-The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`.
+The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3750,7 +3847,7 @@ The default enforcement action to use for the included constraints. `deny` will
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3798,7 +3895,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3816,7 +3913,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3890,7 +3987,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3908,7 +4005,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -3929,7 +4026,7 @@ The validation failure action to use for the policies, `Enforce` will block when
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:----------|
@@ -3977,7 +4074,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -3995,7 +4092,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4014,9 +4111,11 @@ The value of the toleration
The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.
+Default is `none`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------|
@@ -4087,7 +4186,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4105,7 +4204,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4186,7 +4285,7 @@ Set to override the tolerations that will be added to the pods of the module.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4204,7 +4303,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4240,7 +4339,7 @@ The storage backend type for Tempo. `minio` will use an in-cluster MinIO deploym
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4279,7 +4378,7 @@ The bucket name of the external S3-compatible object storage.
### Description
-External S3-compatible endpoint for Tempo's storage.
+The external S3-compatible endpoint for Tempo's storage.
## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure
@@ -4327,7 +4426,7 @@ Set to override the tolerations that will be added to the pods of the package.
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -4345,7 +4444,7 @@ The key of the toleration
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:---------|
@@ -4370,9 +4469,11 @@ The retention time for the traces stored in Tempo.
The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.
+Default is `tempo`.
+
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:--------|
@@ -4383,7 +4484,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr
### Description
-Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.
+Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.
### Constraints
@@ -5004,7 +5105,7 @@ Name for the node group. It will be also used as the node role label. It should
### Constraints
-**enum**: the value of this property must be equal to one of the following values:
+**enum**: the value of this property must be equal to one of the following string values:
| Value |
|:-------------------|
@@ -5150,14 +5251,15 @@ The subnet CIDR to use for the Services network.
### Properties
-| Property | Type | Required |
-|:-----------------------------------------------|:---------|:---------|
-| [chart](#specpluginshelmreleaseschart) | `string` | Required |
-| [name](#specpluginshelmreleasesname) | `string` | Required |
-| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
-| [set](#specpluginshelmreleasesset) | `array` | Optional |
-| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
-| [version](#specpluginshelmreleasesversion) | `string` | Optional |
+| Property | Type | Required |
+|:---------------------------------------------------------------------------------|:----------|:---------|
+| [chart](#specpluginshelmreleaseschart) | `string` | Required |
+| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional |
+| [name](#specpluginshelmreleasesname) | `string` | Required |
+| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required |
+| [set](#specpluginshelmreleasesset) | `array` | Optional |
+| [values](#specpluginshelmreleasesvalues) | `array` | Optional |
+| [version](#specpluginshelmreleasesversion) | `string` | Optional |
## .spec.plugins.helm.releases.chart
@@ -5165,6 +5267,12 @@ The subnet CIDR to use for the Services network.
The chart of the release
+## .spec.plugins.helm.releases.disableValidationOnInstall
+
+### Description
+
+Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading.
+
## .spec.plugins.helm.releases.name
### Description
diff --git a/go.mod b/go.mod
index 121af67c6..8fbb402a3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,11 +1,12 @@
module github.com/sighupio/fury-distribution
-go 1.21
+go 1.23
require (
github.com/Al-Pragliola/go-version v1.6.2
github.com/go-playground/validator/v10 v10.15.5
- golang.org/x/exp v0.0.0-20231006140011-7918f672742d
+ github.com/sighupio/go-jsonschema v0.15.3
+ golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc
)
require (
@@ -15,6 +16,7 @@ require (
github.com/leodido/go-urn v1.2.4 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/net v0.17.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
+ golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 6e7eb35e8..905210baa 100644
--- a/go.sum
+++ b/go.sum
@@ -17,6 +17,8 @@ github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sighupio/go-jsonschema v0.15.3 h1:q2EtYBbXFRQbRbc9/lkFyg2lmxrJFaa8737dvwm/0bo=
+github.com/sighupio/go-jsonschema v0.15.3/go.mod h1:QOHAu5BGlMReCwWJx1Yf7FK+Z5D8TrVVT+SOgInHd5I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -26,14 +28,15 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
-golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
-golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/kfd.yaml b/kfd.yaml
index 576f2bd1c..af7ef8d4b 100644
--- a/kfd.yaml
+++ b/kfd.yaml
@@ -2,24 +2,24 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-version: v1.29.4
+version: v1.30.0
modules:
- auth: v0.3.0
- aws: v4.2.1
- dr: v2.3.0
- ingress: v2.3.3
- logging: v3.4.1
- monitoring: v3.2.0
- opa: v1.12.0
- networking: v1.17.0
- tracing: v1.0.3
+ auth: v0.4.0
+ aws: v4.3.0
+ dr: v3.0.0
+ ingress: v3.0.1
+ logging: v4.0.0
+ monitoring: v3.3.0
+ opa: v1.13.0
+ networking: v2.0.0
+ tracing: v1.1.0
kubernetes:
eks:
- version: 1.29
- installer: v3.1.2
+ version: 1.30
+ installer: v3.2.0
onpremises:
- version: 1.29.3
- installer: v1.29.3-rev.2
+ version: 1.30.6
+ installer: v1.30.6
furyctlSchemas:
eks:
- apiVersion: kfd.sighup.io/v1alpha2
@@ -35,7 +35,7 @@ tools:
furyagent:
version: 0.4.0
kubectl:
- version: 1.29.3
+ version: 1.30.6
kustomize:
version: 3.10.0
terraform:
diff --git a/kustomization.yaml b/kustomization.yaml
deleted file mode 100644
index efe55acd5..000000000
--- a/kustomization.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
----
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-
-# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is.
-
-resources:
- # Networking
- - ./vendor/katalog/networking/calico
- # OPA
- - ./vendor/katalog/opa/gatekeeper/core
- - ./vendor/katalog/opa/gatekeeper/rules/templates
- - ./vendor/katalog/opa/gatekeeper/rules/config
- - ./vendor/katalog/opa/gatekeeper/gpm
- # Monitoring
- - ./vendor/katalog/monitoring/prometheus-operator
- - ./vendor/katalog/monitoring/prometheus-operated
- - ./vendor/katalog/monitoring/grafana
- - ./vendor/katalog/monitoring/kubeadm-sm
- - ./vendor/katalog/monitoring/kube-proxy-metrics
- - ./vendor/katalog/monitoring/kube-state-metrics
- - ./vendor/katalog/monitoring/node-exporter
- - ./vendor/katalog/monitoring/prometheus-adapter
- - ./vendor/katalog/monitoring/alertmanager-operated
- # Logging
- - ./vendor/katalog/logging/opensearch-single
- - ./vendor/katalog/logging/opensearch-dashboards
- - ./vendor/katalog/logging/logging-operator
- - ./vendor/katalog/logging/logging-operated
- - ./vendor/katalog/logging/minio-ha
- - ./vendor/katalog/logging/loki-distributed
- - ./vendor/katalog/logging/configs
- # Ingress
- - ./vendor/katalog/ingress/cert-manager
- - ./vendor/katalog/ingress/nginx
- - ./vendor/katalog/ingress/forecastle
- # DR
- - ./vendor/katalog/dr/velero/velero-on-prem
- - ./vendor/katalog/dr/velero/velero-schedules
- - ./vendor/katalog/dr/velero/velero-node-agent
diff --git a/pkg/apis/config/model.go b/pkg/apis/config/model.go
index 52a55f81c..d48d1e1b8 100644
--- a/pkg/apis/config/model.go
+++ b/pkg/apis/config/model.go
@@ -69,6 +69,7 @@ type KFDToolsCommon struct {
Kustomize KFDTool `yaml:"kustomize" validate:"required"`
Terraform KFDTool `yaml:"terraform" validate:"required"`
Yq KFDTool `yaml:"yq" validate:"required"`
+ Kapp KFDTool `yaml:"kapp"`
Helm KFDTool `yaml:"helm"`
Helmfile KFDTool `yaml:"helmfile"`
}
diff --git a/pkg/apis/config/validation_test.go b/pkg/apis/config/validation_test.go
index 8189ff0ab..bac977fef 100644
--- a/pkg/apis/config/validation_test.go
+++ b/pkg/apis/config/validation_test.go
@@ -32,8 +32,6 @@ func TestValidateAwsRegion(t *testing.T) {
},
}
for _, tC := range testCases {
- tC := tC
-
t.Run(tC.desc, func(t *testing.T) {
t.Parallel()
diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go
index 08b1c1a25..a96dea2a0 100644
--- a/pkg/apis/ekscluster/v1alpha2/private/schema.go
+++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go
@@ -6,97 +6,255 @@ import (
"encoding/json"
"fmt"
"reflect"
+
+ "github.com/sighupio/go-jsonschema/pkg/types"
)
-// A Fury Cluster deployed through AWS's Elastic Kubernetes Service
-type EksclusterKfdV1Alpha2 struct {
- // ApiVersion corresponds to the JSON schema field "apiVersion".
- ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
+ }
+ *j = SpecDistributionModulesMonitoringType(v)
+ return nil
+}
- // Kind corresponds to the JSON schema field "kind".
- Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesLoggingType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
+ }
+ *j = SpecDistributionModulesLoggingType(v)
+ return nil
+}
- // Metadata corresponds to the JSON schema field "metadata".
- Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"`
+type TypesKubeNodeSelector map[string]string
- // Spec corresponds to the JSON schema field "spec".
- Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"`
+type SpecDistributionCommonProvider struct {
+ // The provider type. Don't set. FOR INTERNAL USE ONLY.
+ Type string `json:"type" yaml:"type" mapstructure:"type"`
}
-type EksclusterKfdV1Alpha2Kind string
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionCommonProvider: required")
+ }
+ type Plain SpecDistributionCommonProvider
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionCommonProvider(plain)
+ return nil
+}
-const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster"
+type TypesKubeTolerationEffect string
-type Metadata struct {
- // Name corresponds to the JSON schema field "name".
- Name string `json:"name" yaml:"name" mapstructure:"name"`
+var enumValues_TypesKubeTolerationEffect = []interface{}{
+ "NoSchedule",
+ "PreferNoSchedule",
+ "NoExecute",
}
-type Spec struct {
- // Distribution corresponds to the JSON schema field "distribution".
- Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_TypesKubeTolerationEffect {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v)
+ }
+ *j = TypesKubeTolerationEffect(v)
+ return nil
+}
- // DistributionVersion corresponds to the JSON schema field "distributionVersion".
- DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"`
+const (
+ TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule"
+ TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule"
+ TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute"
+)
- // Infrastructure corresponds to the JSON schema field "infrastructure".
- Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"`
+type TypesKubeTolerationOperator string
- // Kubernetes corresponds to the JSON schema field "kubernetes".
- Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
+var enumValues_TypesKubeTolerationOperator = []interface{}{
+ "Exists",
+ "Equal",
+}
- // Plugins corresponds to the JSON schema field "plugins".
- Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_TypesKubeTolerationOperator {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v)
+ }
+ *j = TypesKubeTolerationOperator(v)
+ return nil
+}
- // Region corresponds to the JSON schema field "region".
- Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
+const (
+ TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists"
+ TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal"
+)
- // This map defines which will be the common tags that will be added to all the
- // resources created on AWS.
- Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+type TypesKubeToleration struct {
+ // Effect corresponds to the JSON schema field "effect".
+ Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"`
- // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration".
- ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"`
-}
+ // The key of the toleration
+ Key string `json:"key" yaml:"key" mapstructure:"key"`
-type SpecDistribution struct {
- // Common corresponds to the JSON schema field "common".
- Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"`
+ // Operator corresponds to the JSON schema field "operator".
+ Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"`
- // CustomPatches corresponds to the JSON schema field "customPatches".
- CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"`
+ // The value of the toleration
+ Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"`
+}
- // Modules corresponds to the JSON schema field "modules".
- Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["effect"]; !ok || v == nil {
+ return fmt.Errorf("field effect in TypesKubeToleration: required")
+ }
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in TypesKubeToleration: required")
+ }
+ type Plain TypesKubeToleration
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = TypesKubeToleration(plain)
+ return nil
}
+// Common configuration for all the distribution modules.
type SpecDistributionCommon struct {
- // The node selector to use to place the pods for all the KFD modules
+ // The node selector to use to place the pods for all the KFD modules. Follows
+ // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// Provider corresponds to the JSON schema field "provider".
Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"`
// URL of the registry where to pull images from for the Distribution phase.
- // (Default is registry.sighup.io/fury).
+ // (Default is `registry.sighup.io/fury`).
//
// NOTE: If plugins are pulling from the default registry, the registry will be
- // replaced for these plugins too.
+ // replaced for the plugin too.
Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
- // The relative path to the vendor directory, does not need to be changed
+ // The relative path to the vendor directory, does not need to be changed.
RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"`
- // The tolerations that will be added to the pods for all the KFD modules
+ // An array with the tolerations that will be added to the pods for all the KFD
+ // modules. Follows Kubernetes tolerations format. Example:
+ //
+ // ```yaml
+ // - effect: NoSchedule
+ // key: node.kubernetes.io/role
+ // value: infra
+ // ```
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
-type SpecDistributionCommonProvider struct {
- // The type of the provider, must be EKS if specified
- Type string `json:"type" yaml:"type" mapstructure:"type"`
+type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string
+
+var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{
+ "create",
+ "replace",
+ "merge",
}
-type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v)
+ }
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v)
+ return nil
+}
+
+const (
+ SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create"
+ SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace"
+ SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge"
+)
+
+type TypesKubeLabels map[string]string
+
+type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct {
+ // The annotations of the configmap
+ Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"`
+
+ // If true, the name suffix hash will be disabled
+ DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"`
+
+ // If true, the configmap will be immutable
+ Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"`
+
+ // The labels of the configmap
+ Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
+}
type SpecDistributionCustomPatchesConfigMapGeneratorResource struct {
// The behavior of the configmap
@@ -121,45 +279,29 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResource struct {
Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"`
}
-type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
+ }
+ type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
+ return nil
+}
-const (
- SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create"
- SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge"
- SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace"
-)
-
-type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct {
- // The annotations of the configmap
- Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"`
-
- // If true, the name suffix hash will be disabled
- DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"`
-
- // If true, the configmap will be immutable
- Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"`
-
- // The labels of the configmap
- Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
-}
+type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource
// Each entry should follow the format of Kustomize's images patch
type SpecDistributionCustomPatchesImages []map[string]interface{}
-type SpecDistributionCustomPatchesPatch struct {
- // Options corresponds to the JSON schema field "options".
- Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"`
-
- // The patch content
- Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"`
-
- // The path of the patch
- Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"`
-
- // Target corresponds to the JSON schema field "target".
- Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"`
-}
-
type SpecDistributionCustomPatchesPatchOptions struct {
// If true, the kind change will be allowed
AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"`
@@ -191,13 +333,73 @@ type SpecDistributionCustomPatchesPatchTarget struct {
Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"`
}
+type SpecDistributionCustomPatchesPatch struct {
+ // Options corresponds to the JSON schema field "options".
+ Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"`
+
+ // The patch content
+ Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"`
+
+ // The path of the patch
+ Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"`
+
+ // Target corresponds to the JSON schema field "target".
+ Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"`
+}
+
type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch
// Each entry should be either a relative file path or an inline content resolving
// to a partial or complete resource definition
type SpecDistributionCustomPatchesPatchesStrategicMerge []string
-type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource
+type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string
+
+var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
+ "create",
+ "replace",
+ "merge",
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
+ }
+ *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
+ return nil
+}
+
+const (
+ SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create"
+ SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace"
+ SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge"
+)
+
+type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct {
+ // The annotations of the secret
+ Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"`
+
+ // If true, the name suffix hash will be disabled
+ DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"`
+
+ // If true, the secret will be immutable
+ Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"`
+
+ // The labels of the secret
+ Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
+}
type SpecDistributionCustomPatchesSecretGeneratorResource struct {
// The behavior of the secret
@@ -225,28 +427,26 @@ type SpecDistributionCustomPatchesSecretGeneratorResource struct {
Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
}
-type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string
-
-const (
- SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create"
- SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge"
- SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace"
-)
-
-type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct {
- // The annotations of the secret
- Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"`
-
- // If true, the name suffix hash will be disabled
- DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"`
-
- // If true, the secret will be immutable
- Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"`
-
- // The labels of the secret
- Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
+ }
+ type Plain SpecDistributionCustomPatchesSecretGeneratorResource
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
+ return nil
}
+type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource
+
type SpecDistributionCustompatches struct {
// ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator".
ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"`
@@ -265,57 +465,41 @@ type SpecDistributionCustompatches struct {
SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"`
}
-type SpecDistributionModules struct {
- // Auth corresponds to the JSON schema field "auth".
- Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
-
- // Aws corresponds to the JSON schema field "aws".
- Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"`
-
- // Dr corresponds to the JSON schema field "dr".
- Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"`
-
- // Ingress corresponds to the JSON schema field "ingress".
- Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"`
-
- // Logging corresponds to the JSON schema field "logging".
- Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"`
-
- // Monitoring corresponds to the JSON schema field "monitoring".
- Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"`
-
- // Networking corresponds to the JSON schema field "networking".
- Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"`
-
- // Policy corresponds to the JSON schema field "policy".
- Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"`
+type SpecDistributionModulesAuthDexExpiry struct {
+ // Dex ID tokens expiration time duration (default 24h).
+ IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"`
- // Tracing corresponds to the JSON schema field "tracing".
- Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"`
+ // Dex signing key expiration time duration (default 6h).
+ SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"`
}
-type SpecDistributionModulesAuth struct {
- // The base domain for the auth module
- BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"`
-
- // Dex corresponds to the JSON schema field "dex".
- Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Pomerium corresponds to the JSON schema field "pomerium".
- Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"`
+type TypesFuryModuleComponentOverrides struct {
+ // Set to override the node selector used to place the pods of the package.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // Provider corresponds to the JSON schema field "provider".
- Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
+ // Set to override the tolerations that will be added to the pods of the package.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
+// Configuration for the Dex package.
type SpecDistributionModulesAuthDex struct {
- // The additional static clients for dex
+ // Additional static clients defitions that will be added to the default clients
+ // included with the distribution in Dex's configuration. Example:
+ //
+ // ```yaml
+ // additionalStaticClients:
+ // - id: my-custom-client
+ // name: "A custom additional static client"
+ // redirectURIs:
+ // - "https://myapp.tld/redirect"
+ // - "https://alias.tld/oidc-callback"
+ // secret: supersecretpassword
+ // ```
+ // Reference: https://dexidp.io/docs/connectors/local/
AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"`
- // The connectors for dex
+ // A list with each item defining a Dex connector. Follows Dex connectors
+ // configuration format: https://dexidp.io/docs/connectors/
Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"`
// Expiry corresponds to the JSON schema field "expiry".
@@ -325,194 +509,222 @@ type SpecDistributionModulesAuthDex struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-type SpecDistributionModulesAuthDexExpiry struct {
- // Dex ID tokens expiration time duration (default 24h).
- IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"`
-
- // Dex signing key expiration time duration (default 6h).
- SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"`
-}
-
-type SpecDistributionModulesAuthOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
- Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
-
- // The node selector to use to place the pods for the auth module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-
- // The tolerations that will be added to the pods for the auth module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["connectors"]; !ok || v == nil {
+ return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
+ }
+ type Plain SpecDistributionModulesAuthDex
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthDex(plain)
+ return nil
}
type SpecDistributionModulesAuthOverridesIngress struct {
- // The host of the ingress
+ // Use this host for the ingress instead of the default one.
Host string `json:"host" yaml:"host" mapstructure:"host"`
- // The ingress class of the ingress
+ // Use this ingress class for the ingress instead of the default one.
IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"`
}
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["host"]; !ok || v == nil {
+ return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
+ }
+ if v, ok := raw["ingressClass"]; !ok || v == nil {
+ return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ }
+ type Plain SpecDistributionModulesAuthOverridesIngress
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthOverridesIngress(plain)
+ return nil
+}
+
+// Override the definition of the Auth module ingresses.
type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress
-type SpecDistributionModulesAuthPomerium interface{}
+// Override the common configuration with a particular configuration for the Auth
+// module.
+type SpecDistributionModulesAuthOverrides struct {
+ // Override the definition of the Auth module ingresses.
+ Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
-// override default routes for KFD components
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct {
- // GatekeeperPolicyManager corresponds to the JSON schema field
- // "gatekeeperPolicyManager".
- GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"`
+ // Set to override the node selector used to place the pods of the Auth module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // HubbleUi corresponds to the JSON schema field "hubbleUi".
- HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"`
+ // Set to override the tolerations that will be added to the pods of the Auth
+ // module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
- // IngressNgnixForecastle corresponds to the JSON schema field
- // "ingressNgnixForecastle".
- IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"`
+type SpecDistributionModulesAuthPomerium interface{}
- // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole".
- LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"`
+// Configuration for the HTTP Basic Auth provider.
+type SpecDistributionModulesAuthProviderBasicAuth struct {
+ // The password for logging in with the HTTP basic authentication.
+ Password string `json:"password" yaml:"password" mapstructure:"password"`
- // LoggingOpensearchDashboards corresponds to the JSON schema field
- // "loggingOpensearchDashboards".
- LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"`
-
- // MonitoringAlertmanager corresponds to the JSON schema field
- // "monitoringAlertmanager".
- MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"`
-
- // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana".
- MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"`
-
- // MonitoringMinioConsole corresponds to the JSON schema field
- // "monitoringMinioConsole".
- MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"`
-
- // MonitoringPrometheus corresponds to the JSON schema field
- // "monitoringPrometheus".
- MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"`
-
- // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole".
- TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"`
+ // The username for logging in with the HTTP basic authentication.
+ Username string `json:"username" yaml:"username" mapstructure:"username"`
}
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{}
-
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{}
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["password"]; !ok || v == nil {
+ return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
+ }
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ }
+ type Plain SpecDistributionModulesAuthProviderBasicAuth
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ return nil
+}
-type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{}
+type SpecDistributionModulesAuthProviderType string
-type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{}
+var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
+ "none",
+ "basicAuth",
+ "sso",
+}
-// Pomerium needs some user-provided secrets to be fully configured. These secrets
-// should be unique between clusters.
-type SpecDistributionModulesAuthPomeriumSecrets struct {
- // Cookie Secret is the secret used to encrypt and sign session cookies.
- //
- // To generate a random key, run the following command: `head -c32 /dev/urandom |
- // base64`
- COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
+ }
+ *j = SpecDistributionModulesAuthProviderType(v)
+ return nil
+}
- // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth
- // type is SSO, this value will be the secret used to authenticate Pomerium with
- // Dex, **use a strong random value**.
- IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"`
+const (
+ SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none"
+ SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth"
+ SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso"
+)
- // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate
- // requests between Pomerium services. It's critical that secret keys are random,
- // and stored safely.
- //
- // To generate a key, run the following command: `head -c32 /dev/urandom | base64`
- SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"`
+type SpecDistributionModulesAuthProvider struct {
+ // BasicAuth corresponds to the JSON schema field "basicAuth".
+ BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"`
- // Signing Key is the base64 representation of one or more PEM-encoded private
- // keys used to sign a user's attestation JWT, which can be consumed by upstream
- // applications to pass along identifying user information like username, id, and
- // groups.
- //
- // To generates an P-256 (ES256) signing key:
+ // The type of the Auth provider, options are:
+ // - `none`: will disable authentication in the infrastructural ingresses.
+ // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO)
+ // and require authentication before accessing them.
+ // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth
+ // (username and password) authentication.
//
- // ```bash
- // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem
- // # careful! this will output your private key in terminal
- // cat ec_private.pem | base64
- // ```
- SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"`
+ // Default is `none`.
+ Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"`
}
-// Configuration for Pomerium, an identity-aware reverse proxy used for SSO.
-type SpecDistributionModulesAuthPomerium_2 struct {
- // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy".
- DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
+ }
+ type Plain SpecDistributionModulesAuthProvider
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthProvider(plain)
+ return nil
+}
- // DEPRECATED: Use defaultRoutesPolicy and/or routes
- Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"`
+// Configuration for the Auth module.
+type SpecDistributionModulesAuth struct {
+ // The base domain for the ingresses created by the Auth module (Gangplank,
+ // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will
+ // use the `external` ingress class.
+ BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"`
- // Additional routes configuration for Pomerium. Follows Pomerium's route format:
- // https://www.pomerium.com/docs/reference/routes
- Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"`
+ // Dex corresponds to the JSON schema field "dex".
+ Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"`
- // Secrets corresponds to the JSON schema field "secrets".
- Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"`
-}
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-type SpecDistributionModulesAuthProvider struct {
- // BasicAuth corresponds to the JSON schema field "basicAuth".
- BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"`
+ // Pomerium corresponds to the JSON schema field "pomerium".
+ Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"`
- // The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
- Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"`
+ // Provider corresponds to the JSON schema field "provider".
+ Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
}
-type SpecDistributionModulesAuthProviderBasicAuth struct {
- // The password for the basic auth
- Password string `json:"password" yaml:"password" mapstructure:"password"`
-
- // The username for the basic auth
- Username string `json:"username" yaml:"username" mapstructure:"username"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
+ }
+ type Plain SpecDistributionModulesAuth
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuth(plain)
+ return nil
}
-type SpecDistributionModulesAuthProviderType string
-
-const (
- SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth"
- SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none"
- SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso"
-)
-
-type SpecDistributionModulesAws struct {
- // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler".
- ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"`
+type TypesAwsArn string
- // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver".
- EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"`
+type TypesAwsIamRoleName string
- // EbsSnapshotController corresponds to the JSON schema field
- // "ebsSnapshotController".
- EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"`
+type TypesFuryModuleComponentOverridesWithIAMRoleName struct {
+ // IamRoleName corresponds to the JSON schema field "iamRoleName".
+ IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"`
- // LoadBalancerController corresponds to the JSON schema field
- // "loadBalancerController".
- LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"`
+ // The node selector to use to place the pods for the load balancer controller
+ // module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"`
+ // The tolerations that will be added to the pods for the cluster autoscaler
+ // module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type SpecDistributionModulesAwsClusterAutoscaler struct {
@@ -523,6 +735,24 @@ type SpecDistributionModulesAwsClusterAutoscaler struct {
Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["iamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required")
+ }
+ type Plain SpecDistributionModulesAwsClusterAutoscaler
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAwsClusterAutoscaler(plain)
+ return nil
+}
+
type SpecDistributionModulesAwsEbsCsiDriver struct {
// IamRoleArn corresponds to the JSON schema field "iamRoleArn".
IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"`
@@ -531,6 +761,24 @@ type SpecDistributionModulesAwsEbsCsiDriver struct {
Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["iamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required")
+ }
+ type Plain SpecDistributionModulesAwsEbsCsiDriver
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAwsEbsCsiDriver(plain)
+ return nil
+}
+
type SpecDistributionModulesAwsEbsSnapshotController struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -544,54 +792,270 @@ type SpecDistributionModulesAwsLoadBalancerController struct {
Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-type SpecDistributionModulesDr struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["iamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required")
+ }
+ type Plain SpecDistributionModulesAwsLoadBalancerController
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAwsLoadBalancerController(plain)
+ return nil
+}
- // The type of the DR, must be ***none*** or ***eks***
- Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"`
+type TypesFuryModuleOverridesIngress struct {
+ // If true, the ingress will not have authentication even if
+ // `.spec.modules.auth.provider.type` is SSO or Basic Auth.
+ DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
- // Velero corresponds to the JSON schema field "velero".
- Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"`
+ // Use this host for the ingress instead of the default one.
+ Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
+
+ // Use this ingress class for the ingress instead of the default one.
+ IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
+}
+
+type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
+
+// Override the common configuration with a particular configuration for the
+// module.
+type TypesFuryModuleOverrides struct {
+ // Ingresses corresponds to the JSON schema field "ingresses".
+ Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
+
+ // Set to override the node selector used to place the pods of the module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
+type SpecDistributionModulesAws struct {
+ // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler".
+ ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"`
+
+ // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver".
+ EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"`
+
+ // EbsSnapshotController corresponds to the JSON schema field
+ // "ebsSnapshotController".
+ EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"`
+
+ // LoadBalancerController corresponds to the JSON schema field
+ // "loadBalancerController".
+ LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["clusterAutoscaler"]; !ok || v == nil {
+ return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required")
+ }
+ if v, ok := raw["ebsCsiDriver"]; !ok || v == nil {
+ return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required")
+ }
+ if v, ok := raw["loadBalancerController"]; !ok || v == nil {
+ return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required")
+ }
+ if v, ok := raw["overrides"]; !ok || v == nil {
+ return fmt.Errorf("field overrides in SpecDistributionModulesAws: required")
+ }
+ type Plain SpecDistributionModulesAws
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAws(plain)
+ return nil
}
type SpecDistributionModulesDrType string
+var enumValues_SpecDistributionModulesDrType = []interface{}{
+ "none",
+ "eks",
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesDrType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ }
+ *j = SpecDistributionModulesDrType(v)
+ return nil
+}
+
const (
- SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks"
SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none"
+ SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks"
)
-type SpecDistributionModulesDrVelero struct {
- // Eks corresponds to the JSON schema field "eks".
- Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"`
+type TypesAwsS3BucketName string
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+type TypesAwsRegion string
- // Configuration for Velero's backup schedules.
- Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"`
+var enumValues_TypesAwsRegion = []interface{}{
+ "af-south-1",
+ "ap-east-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-south-1",
+ "ap-south-2",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ap-southeast-3",
+ "ap-southeast-4",
+ "ca-central-1",
+ "eu-central-1",
+ "eu-central-2",
+ "eu-north-1",
+ "eu-south-1",
+ "eu-south-2",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "me-central-1",
+ "me-south-1",
+ "sa-east-1",
+ "us-east-1",
+ "us-east-2",
+ "us-gov-east-1",
+ "us-gov-west-1",
+ "us-west-1",
+ "us-west-2",
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_TypesAwsRegion {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v)
+ }
+ *j = TypesAwsRegion(v)
+ return nil
+}
+
+const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1"
+
+type Metadata struct {
+ // The name of the cluster. It will also be used as a prefix for all the other
+ // resources created.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
}
+const (
+ TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1"
+ TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2"
+ TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3"
+ TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1"
+ TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2"
+ TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1"
+ TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2"
+ TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3"
+ TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4"
+ TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1"
+ TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1"
+ TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2"
+ TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1"
+ TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1"
+ TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2"
+ TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1"
+ TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2"
+ TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3"
+ TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1"
+ TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1"
+ TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1"
+ TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1"
+ TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2"
+ TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1"
+ TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1"
+ TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1"
+ TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2"
+)
+
type SpecDistributionModulesDrVeleroEks struct {
- // The name of the velero bucket
+ // The name of the bucket for Velero.
BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"`
// IamRoleArn corresponds to the JSON schema field "iamRoleArn".
IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"`
- // The region where the velero bucket is located
+ // The region where the bucket for Velero will be located.
Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
}
-// Configuration for Velero's backup schedules.
-type SpecDistributionModulesDrVeleroSchedules struct {
- // Configuration for Velero's schedules cron.
- Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["bucketName"]; !ok || v == nil {
+ return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required")
+ }
+ if v, ok := raw["iamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required")
+ }
+ if v, ok := raw["region"]; !ok || v == nil {
+ return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required")
+ }
+ type Plain SpecDistributionModulesDrVeleroEks
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesDrVeleroEks(plain)
+ return nil
+}
- // Whether to install or not the default `manifests` and `full` backups schedules.
- // Default is `true`.
- Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct {
+ // The cron expression for the `full` backup schedule (default `0 1 * * *`).
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // EXPERIMENTAL (if you do more than one backups, the following backups after the
+ // first are not automatically restorable, see
+ // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for
+ // the manual restore solution): SnapshotMoveData specifies whether snapshot data
+ // should be moved. Velero will create a new volume from the snapshot and upload
+ // the content to the storageLocation.
+ SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"`
// The Time To Live (TTL) of the backups created by the backup schedules (default
// `720h0m0s`, 30 days). Notice that changing this value will affect only newly
@@ -599,72 +1063,99 @@ type SpecDistributionModulesDrVeleroSchedules struct {
Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
}
-// Configuration for Velero's schedules cron.
-type SpecDistributionModulesDrVeleroSchedulesCron struct {
- // The cron expression for the `full` backup schedule (default `0 1 * * *`).
- Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
-
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct {
// The cron expression for the `manifests` backup schedule (default `*/15 * * *
// *`).
- Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
-}
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
-type SpecDistributionModulesIngress struct {
- // the base domain used for all the KFD ingresses, if in the nginx dual
- // configuration, it should be the same as the
- // .spec.distribution.modules.ingress.dns.private.name zone
- BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"`
-
- // CertManager corresponds to the JSON schema field "certManager".
- CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"`
-
- // Dns corresponds to the JSON schema field "dns".
- Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"`
+ // The Time To Live (TTL) of the backups created by the backup schedules (default
+ // `720h0m0s`, 30 days). Notice that changing this value will affect only newly
+ // created backups, prior backups will keep the old TTL.
+ Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
+}
- // ExternalDns corresponds to the JSON schema field "externalDns".
- ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"`
+// Configuration for Velero schedules.
+type SpecDistributionModulesDrVeleroSchedulesDefinitions struct {
+ // Configuration for Velero's manifests backup schedule.
+ Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
- // Forecastle corresponds to the JSON schema field "forecastle".
- Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
+ // Configuration for Velero's manifests backup schedule.
+ Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+}
- // Configurations for the nginx ingress controller module
- Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"`
+// Configuration for Velero's backup schedules.
+type SpecDistributionModulesDrVeleroSchedules struct {
+ // Configuration for Velero schedules.
+ Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"`
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+ // Whether to install or not the default `manifests` and `full` backups schedules.
+ // Default is `true`.
+ Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
}
-type SpecDistributionModulesIngressCertManager struct {
- // ClusterIssuer corresponds to the JSON schema field "clusterIssuer".
- ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"`
+type SpecDistributionModulesDrVelero struct {
+ // Eks corresponds to the JSON schema field "eks".
+ Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-type SpecDistributionModulesIngressCertManagerClusterIssuer struct {
- // The email of the cluster issuer
- Email string `json:"email" yaml:"email" mapstructure:"email"`
+ // Configuration for Velero's backup schedules.
+ Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"`
+}
- // The name of the cluster issuer
- Name string `json:"name" yaml:"name" mapstructure:"name"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["eks"]; !ok || v == nil {
+ return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required")
+ }
+ type Plain SpecDistributionModulesDrVelero
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesDrVelero(plain)
+ return nil
+}
- // Route53 corresponds to the JSON schema field "route53".
- Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"`
+// Configuration for the Disaster Recovery module.
+type SpecDistributionModulesDr struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The custom solvers configurations
- Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"`
+ // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the
+ // module and `eks` will install Velero and use an S3 bucket to store the
+ // backups.
+ //
+ // Default is `none`.
+ Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"`
- // The type of the cluster issuer, must be ***dns01*** or ***http01***
- Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
+ // Velero corresponds to the JSON schema field "velero".
+ Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"`
}
-type SpecDistributionModulesIngressCertManagerClusterIssuerType string
-
-const (
- SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01"
- SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01"
-)
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ }
+ type Plain SpecDistributionModulesDr
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesDr(plain)
+ return nil
+}
type SpecDistributionModulesIngressClusterIssuerRoute53 struct {
// HostedZoneId corresponds to the JSON schema field "hostedZoneId".
@@ -677,2730 +1168,2603 @@ type SpecDistributionModulesIngressClusterIssuerRoute53 struct {
Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
}
-type SpecDistributionModulesIngressDNS struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Private corresponds to the JSON schema field "private".
- Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"`
-
- // Public corresponds to the JSON schema field "public".
- Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["hostedZoneId"]; !ok || v == nil {
+ return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ }
+ if v, ok := raw["iamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ }
+ if v, ok := raw["region"]; !ok || v == nil {
+ return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ }
+ type Plain SpecDistributionModulesIngressClusterIssuerRoute53
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain)
+ return nil
}
-type SpecDistributionModulesIngressDNSPrivate struct {
- // If true, the private hosted zone will be created
- Create bool `json:"create" yaml:"create" mapstructure:"create"`
+type SpecDistributionModulesIngressCertManagerClusterIssuerType string
- // The name of the private hosted zone
- Name string `json:"name" yaml:"name" mapstructure:"name"`
+var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
+ "dns01",
+ "http01",
+}
- // VpcId corresponds to the JSON schema field "vpcId".
- VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
+ }
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
+ return nil
}
-type SpecDistributionModulesIngressDNSPublic struct {
- // If true, the public hosted zone will be created
- Create bool `json:"create" yaml:"create" mapstructure:"create"`
+const (
+ SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01"
+ SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01"
+)
+
+// Configuration for the cert-manager's ACME clusterIssuer used to request
+// certificates from Let's Encrypt.
+type SpecDistributionModulesIngressCertManagerClusterIssuer struct {
+ // The email address to use during the certificate issuing process.
+ Email string `json:"email" yaml:"email" mapstructure:"email"`
- // The name of the public hosted zone
+ // The name of the clusterIssuer.
Name string `json:"name" yaml:"name" mapstructure:"name"`
-}
-type SpecDistributionModulesIngressExternalDNS struct {
- // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn".
- PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"`
+ // Route53 corresponds to the JSON schema field "route53".
+ Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"`
- // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn".
- PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"`
+ // The list of challenge solvers to use instead of the default one for the
+ // `http01` challenge. Check [cert manager's
+ // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types)
+ // for examples for this field.
+ Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"`
+
+ // The type of the clusterIssuer, must be `dns01` for using DNS challenge or
+ // `http01` for using HTTP challenge.
+ Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
}
-type SpecDistributionModulesIngressForecastle struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["email"]; !ok || v == nil {
+ return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ if v, ok := raw["route53"]; !ok || v == nil {
+ return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
+ return nil
}
-type SpecDistributionModulesIngressNginx struct {
+// Configuration for the cert-manager package. Required even if
+// `ingress.nginx.type` is `none`, cert-manager is used for managing other
+// certificates in the cluster besides the TLS termination certificates for the
+// ingresses.
+type SpecDistributionModulesIngressCertManager struct {
+ // ClusterIssuer corresponds to the JSON schema field "clusterIssuer".
+ ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"`
+
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Tls corresponds to the JSON schema field "tls".
- Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"`
-
- // The type of the nginx ingress controller, must be ***none***, ***single*** or
- // ***dual***
- Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"`
}
-type SpecDistributionModulesIngressNginxTLS struct {
- // The provider of the TLS certificate, must be ***none***, ***certManager*** or
- // ***secret***
- Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
-
- // Secret corresponds to the JSON schema field "secret".
- Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["clusterIssuer"]; !ok || v == nil {
+ return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManager
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressCertManager(plain)
+ return nil
}
-type SpecDistributionModulesIngressNginxTLSProvider string
-
-const (
- SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager"
- SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none"
- SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret"
-)
-
-type SpecDistributionModulesIngressNginxTLSSecret struct {
- // Ca corresponds to the JSON schema field "ca".
- Ca string `json:"ca" yaml:"ca" mapstructure:"ca"`
+// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for
+// exposing infrastructural services only in the private DNS zone.
+type SpecDistributionModulesIngressDNSPrivate struct {
+ // By default, a Terraform data source will be used to get the private DNS zone.
+ // Set to `true` to create the private zone instead.
+ Create bool `json:"create" yaml:"create" mapstructure:"create"`
- // The certificate file content or you can use the file notation to get the
- // content from a file
- Cert string `json:"cert" yaml:"cert" mapstructure:"cert"`
+ // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
- // Key corresponds to the JSON schema field "key".
- Key string `json:"key" yaml:"key" mapstructure:"key"`
+ // VpcId corresponds to the JSON schema field "vpcId".
+ VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"`
}
-type SpecDistributionModulesIngressNginxType string
-
-const (
- SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual"
- SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none"
- SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single"
-)
-
-type SpecDistributionModulesIngressOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
- Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
-
- // The node selector to use to place the pods for the ingress module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-
- // The tolerations that will be added to the pods for the ingress module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["create"]; !ok || v == nil {
+ return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required")
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required")
+ }
+ if v, ok := raw["vpcId"]; !ok || v == nil {
+ return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required")
+ }
+ type Plain SpecDistributionModulesIngressDNSPrivate
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressDNSPrivate(plain)
+ return nil
}
-type SpecDistributionModulesIngressOverridesIngresses struct {
- // Forecastle corresponds to the JSON schema field "forecastle".
- Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
+type SpecDistributionModulesIngressDNSPublic struct {
+ // By default, a Terraform data source will be used to get the public DNS zone.
+ // Set to `true` to create the public zone instead.
+ Create bool `json:"create" yaml:"create" mapstructure:"create"`
+
+ // The name of the public hosted zone.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
}
-type SpecDistributionModulesLogging struct {
- // Cerebro corresponds to the JSON schema field "cerebro".
- Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"`
-
- // CustomOutputs corresponds to the JSON schema field "customOutputs".
- CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"`
-
- // Loki corresponds to the JSON schema field "loki".
- Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"`
-
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Opensearch corresponds to the JSON schema field "opensearch".
- Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"`
-
- // Operator corresponds to the JSON schema field "operator".
- Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // selects the logging stack. Choosing none will disable the centralized logging.
- // Choosing opensearch will deploy and configure the Logging Operator and an
- // OpenSearch cluster (can be single or triple for HA) where the logs will be
- // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh
- // for storage. Choosing customOuput the Logging Operator will be deployed and
- // installed but with no local storage, you will have to create the needed Outputs
- // and ClusterOutputs to ship the logs to your desired storage.
- Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesLoggingCerebro struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-// when using the customOutputs logging type, you need to manually specify the spec
-// of the several Output and ClusterOutputs that the Logging Operator expects to
-// forward the logs collected by the pre-defined flows.
-type SpecDistributionModulesLoggingCustomOutputs struct {
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- Audit string `json:"audit" yaml:"audit" mapstructure:"audit"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- Errors string `json:"errors" yaml:"errors" mapstructure:"errors"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- Events string `json:"events" yaml:"events" mapstructure:"events"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- Infra string `json:"infra" yaml:"infra" mapstructure:"infra"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"`
-
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
- SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"`
-}
-
-type SpecDistributionModulesLoggingLoki struct {
- // Backend corresponds to the JSON schema field "backend".
- Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
- ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-}
-
-type SpecDistributionModulesLoggingLokiBackend string
-
-const (
- SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint"
- SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio"
-)
-
-type SpecDistributionModulesLoggingLokiExternalEndpoint struct {
- // The access key id of the loki external endpoint
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the loki external endpoint
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // The endpoint of the loki external endpoint
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, the loki external endpoint will be insecure
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
-
- // The secret access key of the loki external endpoint
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
-}
-
-type SpecDistributionModulesLoggingMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The PVC size for each minio disk, 6 disks total
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesLoggingMinioRootUser struct {
- // The password of the minio root user
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username of the minio root user
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-type SpecDistributionModulesLoggingOpensearch struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-
- // The storage size for the opensearch pods
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-
- // The type of the opensearch, must be ***single*** or ***triple***
- Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesLoggingOpensearchType string
-
-const (
- SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single"
- SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple"
-)
-
-type SpecDistributionModulesLoggingOperator struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesLoggingType string
-
-const (
- SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs"
- SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki"
- SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none"
- SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch"
-)
-
-// configuration for the Monitoring module components
-type SpecDistributionModulesMonitoring struct {
- // Alertmanager corresponds to the JSON schema field "alertmanager".
- Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"`
-
- // BlackboxExporter corresponds to the JSON schema field "blackboxExporter".
- BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"`
-
- // Grafana corresponds to the JSON schema field "grafana".
- Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"`
-
- // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics".
- KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"`
-
- // Mimir corresponds to the JSON schema field "mimir".
- Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"`
-
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Prometheus corresponds to the JSON schema field "prometheus".
- Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"`
-
- // PrometheusAgent corresponds to the JSON schema field "prometheusAgent".
- PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"`
-
- // The type of the monitoring, must be ***none***, ***prometheus***,
- // ***prometheusAgent*** or ***mimir***.
- //
- // - `none`: will disable the whole monitoring stack.
- // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
- // instance, Alertmanager, a set of alert rules, exporters needed to monitor all
- // the components of the cluster, Grafana and a series of dashboards to view the
- // collected metrics, and more.
- // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus
- // in Agent mode (no alerting, no queries, no storage), and all the exporters
- // needed to get metrics for the status of the cluster and the workloads. Useful
- // when having a centralized (remote) Prometheus where to ship the metrics and not
- // storing them locally in the cluster.
- // - `mimir`: will install the same as the `prometheus` option, and in addition
- // Grafana Mimir that allows for longer retention of metrics and the usage of
- // Object Storage.
- Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
-
- // X509Exporter corresponds to the JSON schema field "x509Exporter".
- X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringAlertManager struct {
- // The webhook url to send deadman switch monitoring, for example to use with
- // healthchecks.io
- DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
-
- // If true, the default rules will be installed
- InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"`
-
- // The slack webhook url to send alerts
- SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringBlackboxExporter struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringGrafana struct {
- // Setting this to true will deploy an additional `grafana-basic-auth` ingress
- // protected with Grafana's basic auth instead of SSO. It's intended use is as a
- // temporary ingress for when there are problems with the SSO login flow.
- //
- // Notice that by default anonymous access is enabled.
- BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's
- // role. Example:
- //
- // ```yaml
- // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' ||
- // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') &&
- // 'Viewer'
- // ```
- //
- // More details in [Grafana's
- // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping).
- UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringKubeStateMetrics struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringMimir struct {
- // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
- Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
- ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The retention time for the mimir pods
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringMimirBackend string
-
-const (
- SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint"
- SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio"
-)
-
-type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
- // The access key id of the external mimir backend
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the external mimir backend
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // The endpoint of the external mimir backend
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, the external mimir backend will not use tls
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
-
- // The secret access key of the external mimir backend
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The storage size for the minio pods
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringMinioRootUser struct {
- // The password for the minio root user
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the minio root user
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheus struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-
- // The retention size for the k8s Prometheus instance.
- RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
-
- // The retention time for the k8s Prometheus instance.
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-
- // The storage size for the k8s Prometheus instance.
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheusAgent struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringType string
-
-const (
- SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
- SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
- SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
- SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
-)
-
-type SpecDistributionModulesMonitoringX509Exporter struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesNetworking struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
- TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
-
- // Type corresponds to the JSON schema field "type".
- Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
-}
-
-type SpecDistributionModulesNetworkingTigeraOperator struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesNetworkingType string
-
-const SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none"
-
-type SpecDistributionModulesPolicy struct {
- // Gatekeeper corresponds to the JSON schema field "gatekeeper".
- Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
-
- // Kyverno corresponds to the JSON schema field "kyverno".
- Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The type of security to use, either ***none***, ***gatekeeper*** or
- // ***kyverno***
- Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesPolicyGatekeeper struct {
- // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
- // enforce the constraints on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // The enforcement action to use for the gatekeeper module
- EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
-
- // If true, the default policies will be installed
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
-
-const (
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
-)
-
-type SpecDistributionModulesPolicyKyverno struct {
- // This parameter adds namespaces to Kyverno's exemption list, so it will not
- // enforce the constraints on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // If true, the default policies will be installed
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The validation failure action to use for the kyverno module
- ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
-}
-
-type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
-
-const (
- SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
- SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
-)
-
-type SpecDistributionModulesPolicyType string
-
-const (
- SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
- SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
- SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
-)
-
-type SpecDistributionModulesTracing struct {
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Tempo corresponds to the JSON schema field "tempo".
- Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
-
- // The type of tracing to use, either ***none*** or ***tempo***
- Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesTracingMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The storage size for the minio pods
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesTracingMinioRootUser struct {
- // The password for the minio root user
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the minio root user
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-type SpecDistributionModulesTracingTempo struct {
- // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
- Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
- ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The retention time for the tempo pods
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-}
-
-type SpecDistributionModulesTracingTempoBackend string
-
-const (
- SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
- SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
-)
-
-type SpecDistributionModulesTracingTempoExternalEndpoint struct {
- // The access key id of the external tempo backend
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the external tempo backend
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // The endpoint of the external tempo backend
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, the external tempo backend will not use tls
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
-
- // The secret access key of the external tempo backend
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
-}
-
-type SpecDistributionModulesTracingType string
-
-const (
- SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
- SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
-)
-
-type SpecInfrastructure struct {
- // This key defines the VPC that will be created in AWS
- Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"`
-
- // This section defines the creation of VPN bastions
- Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"`
-}
-
-type SpecInfrastructureVpc struct {
- // Network corresponds to the JSON schema field "network".
- Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"`
-}
-
-type SpecInfrastructureVpcNetwork struct {
- // This is the CIDR of the VPC that will be created
- Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"`
-
- // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs".
- SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"`
-}
-
-type SpecInfrastructureVpcNetworkSubnetsCidrs struct {
- // These are the CIRDs for the private subnets, where the nodes, the pods, and the
- // private load balancers will be created
- Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"`
-
- // These are the CIDRs for the public subnets, where the public load balancers and
- // the VPN servers will be created
- Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"`
-}
-
-type SpecInfrastructureVpn struct {
- // This value defines the prefix that will be used to create the bucket name where
- // the VPN servers will store the states
- BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"`
-
- // The dhParamsBits size used for the creation of the .pem file that will be used
- // in the dh openvpn server.conf file
- DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"`
-
- // The size of the disk in GB
- DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"`
-
- // Overrides the default IAM user name for the VPN
- IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"`
-
- // The size of the AWS EC2 instance
- InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"`
-
- // The number of instances to create, 0 to skip the creation
- Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"`
-
- // The username of the account to create in the bastion's operating system
- OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"`
-
- // The port used by the OpenVPN server
- Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"`
-
- // Ssh corresponds to the JSON schema field "ssh".
- Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"`
-
- // The VPC ID where the VPN servers will be created, required only if
- // .spec.infrastructure.vpc is omitted
- VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
-
- // The CIDR that will be used to assign IP addresses to the VPN clients when
- // connected
- VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"`
-}
-
-type SpecInfrastructureVpnSsh struct {
- // The CIDR enabled in the security group that can access the bastions in SSH
- AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"`
-
- // The github user name list that will be used to get the ssh public key that will
- // be added as authorized key to the operatorName user
- GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"`
-
- // This value defines the public keys that will be added to the bastion's
- // operating system NOTES: Not yet implemented
- PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"`
-}
-
-type SpecKubernetes struct {
- // ApiServer corresponds to the JSON schema field "apiServer".
- ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"`
-
- // AwsAuth corresponds to the JSON schema field "awsAuth".
- AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"`
-
- // Overrides the default IAM role name prefix for the EKS cluster
- ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"`
-
- // Optional Kubernetes Cluster log retention in days. Defaults to 90 days.
- LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"`
-
- // Optional list of Kubernetes Cluster log types to enable. Defaults to all types.
- LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"`
-
- // This key contains the ssh public key that can connect to the nodes via SSH
- // using the ec2-user user
- NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"`
-
- // NodePools corresponds to the JSON schema field "nodePools".
- NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"`
-
- // Either `launch_configurations`, `launch_templates` or `both`. For new clusters
- // use `launch_templates`, for existing cluster you'll need to migrate from
- // `launch_configurations` to `launch_templates` using `both` as interim.
- NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"`
-
- // This value defines the CIDR that will be used to assign IP addresses to the
- // services
- ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"`
-
- // This value defines the subnet IDs where the EKS cluster will be created,
- // required only if .spec.infrastructure.vpc is omitted
- SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
-
- // This value defines the VPC ID where the EKS cluster will be created, required
- // only if .spec.infrastructure.vpc is omitted
- VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
-
- // Overrides the default IAM role name prefix for the EKS workers
- WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"`
-}
-
-type SpecKubernetesAPIServer struct {
- // This value defines if the API server will be accessible only from the private
- // subnets
- PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"`
-
- // This value defines the CIDRs that will be allowed to access the API server from
- // the private subnets
- PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"`
-
- // This value defines if the API server will be accessible from the public subnets
- PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"`
-
- // This value defines the CIDRs that will be allowed to access the API server from
- // the public subnets
- PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"`
-}
-
-type SpecKubernetesAwsAuth struct {
- // This optional array defines additional AWS accounts that will be added to the
- // aws-auth configmap
- AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"`
-
- // This optional array defines additional IAM roles that will be added to the
- // aws-auth configmap
- Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"`
-
- // This optional array defines additional IAM users that will be added to the
- // aws-auth configmap
- Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
-}
-
-type SpecKubernetesAwsAuthRole struct {
- // Groups corresponds to the JSON schema field "groups".
- Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"`
-
- // Rolearn corresponds to the JSON schema field "rolearn".
- Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"`
-
- // Username corresponds to the JSON schema field "username".
- Username string `json:"username" yaml:"username" mapstructure:"username"`
-}
-
-type SpecKubernetesAwsAuthUser struct {
- // Groups corresponds to the JSON schema field "groups".
- Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"`
-
- // Userarn corresponds to the JSON schema field "userarn".
- Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"`
-
- // Username corresponds to the JSON schema field "username".
- Username string `json:"username" yaml:"username" mapstructure:"username"`
-}
-
-type SpecKubernetesLogsTypesElem string
-
-const (
- SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api"
- SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit"
- SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator"
- SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager"
- SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler"
-)
-
-type SpecKubernetesNodePool struct {
- // AdditionalFirewallRules corresponds to the JSON schema field
- // "additionalFirewallRules".
- AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"`
-
- // Ami corresponds to the JSON schema field "ami".
- Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"`
-
- // This optional array defines additional target groups to attach to the instances
- // in the node pool
- AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"`
-
- // The container runtime to use for the nodes
- ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"`
-
- // Instance corresponds to the JSON schema field "instance".
- Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"`
-
- // Kubernetes labels that will be added to the nodes
- Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
-
- // The name of the node pool
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // Size corresponds to the JSON schema field "size".
- Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"`
-
- // This value defines the subnet IDs where the nodes will be created
- SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
-
- // AWS tags that will be added to the ASG and EC2 instances
- Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
-
- // Kubernetes taints that will be added to the nodes
- Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"`
-
- // Type corresponds to the JSON schema field "type".
- Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
-}
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct {
- // CidrBlocks corresponds to the JSON schema field "cidrBlocks".
- CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"`
-
- // Name corresponds to the JSON schema field "name".
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // Ports corresponds to the JSON schema field "ports".
- Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
-
- // Protocol corresponds to the JSON schema field "protocol".
- Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
-
- // Tags corresponds to the JSON schema field "tags".
- Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
-
- // Type corresponds to the JSON schema field "type".
- Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string
-
-const (
- SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress"
- SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress"
-)
-
-type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct {
- // From corresponds to the JSON schema field "from".
- From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"`
-
- // To corresponds to the JSON schema field "to".
- To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"`
-}
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct {
- // The name of the FW rule
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // Ports corresponds to the JSON schema field "ports".
- Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
-
- // The protocol of the FW rule
- Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
-
- // If true, the source will be the security group itself
- Self bool `json:"self" yaml:"self" mapstructure:"self"`
-
- // The tags of the FW rule
- Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
-
- // The type of the FW rule can be ingress or egress
- Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string
-
-const (
- SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress"
- SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress"
-)
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct {
- // The name of the FW rule
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // Ports corresponds to the JSON schema field "ports".
- Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
-
- // The protocol of the FW rule
- Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
-
- // The source security group ID
- SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"`
-
- // The tags of the FW rule
- Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
-
- // The type of the FW rule can be ingress or egress
- Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string
-
-const (
- SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress"
- SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress"
-)
-
-type SpecKubernetesNodePoolAdditionalFirewallRules struct {
- // The CIDR blocks for the FW rule. At the moment the first item of the list will
- // be used, others will be ignored.
- CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"`
-
- // Self corresponds to the JSON schema field "self".
- Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"`
-
- // SourceSecurityGroupId corresponds to the JSON schema field
- // "sourceSecurityGroupId".
- SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"`
-}
-
-type SpecKubernetesNodePoolAmi struct {
- // The AMI ID to use for the nodes
- Id string `json:"id" yaml:"id" mapstructure:"id"`
-
- // The owner of the AMI
- Owner string `json:"owner" yaml:"owner" mapstructure:"owner"`
-}
-
-type SpecKubernetesNodePoolContainerRuntime string
-
-const (
- SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd"
- SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker"
-)
-
-type SpecKubernetesNodePoolInstance struct {
- // MaxPods corresponds to the JSON schema field "maxPods".
- MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"`
-
- // If true, the nodes will be created as spot instances
- Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"`
-
- // The instance type to use for the nodes
- Type string `json:"type" yaml:"type" mapstructure:"type"`
-
- // The size of the disk in GB
- VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"`
-
- // VolumeType corresponds to the JSON schema field "volumeType".
- VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"`
-}
-
-type SpecKubernetesNodePoolInstanceVolumeType string
-
-const (
- SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2"
- SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3"
- SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1"
- SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard"
-)
-
-type SpecKubernetesNodePoolSize struct {
- // The maximum number of nodes in the node pool
- Max int `json:"max" yaml:"max" mapstructure:"max"`
-
- // The minimum number of nodes in the node pool
- Min int `json:"min" yaml:"min" mapstructure:"min"`
-}
-
-type SpecKubernetesNodePoolType string
-
-const (
- SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed"
- SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed"
-)
-
-type SpecKubernetesNodePoolsLaunchKind string
-
-const (
- SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both"
- SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations"
- SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates"
-)
-
-type SpecPlugins struct {
- // Helm corresponds to the JSON schema field "helm".
- Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"`
-
- // Kustomize corresponds to the JSON schema field "kustomize".
- Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"`
-}
-
-type SpecPluginsHelm struct {
- // Releases corresponds to the JSON schema field "releases".
- Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"`
-
- // Repositories corresponds to the JSON schema field "repositories".
- Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["create"]; !ok || v == nil {
+ return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required")
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required")
+ }
+ type Plain SpecDistributionModulesIngressDNSPublic
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressDNSPublic(plain)
+ return nil
}
-type SpecPluginsHelmReleases []struct {
- // The chart of the release
- Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
-
- // The name of the release
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // The namespace of the release
- Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"`
-
- // Set corresponds to the JSON schema field "set".
- Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"`
+// DNS definition, used in conjunction with `externalDNS` package to automate DNS
+// management and certificates emission.
+type SpecDistributionModulesIngressDNS struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The values of the release
- Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"`
+ // Private corresponds to the JSON schema field "private".
+ Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"`
- // The version of the release
- Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"`
+ // Public corresponds to the JSON schema field "public".
+ Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"`
}
-type SpecPluginsHelmReleasesElemSetElem struct {
- // The name of the set
- Name string `json:"name" yaml:"name" mapstructure:"name"`
+type SpecDistributionModulesIngressExternalDNS struct {
+ // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn".
+ PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"`
- // The value of the set
- Value string `json:"value" yaml:"value" mapstructure:"value"`
+ // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn".
+ PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
+ if v, ok := raw["privateIamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required")
}
- type Plain SpecDistributionModulesIngressNginx
+ if v, ok := raw["publicIamRoleArn"]; !ok || v == nil {
+ return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required")
+ }
+ type Plain SpecDistributionModulesIngressExternalDNS
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginx(plain)
+ *j = SpecDistributionModulesIngressExternalDNS(plain)
return nil
}
+type SpecDistributionModulesIngressForecastle struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesIngressNginxTLSProvider string
+
+var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
+ "certManager",
+ "secret",
+ "none",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
}
- *j = SpecDistributionModulesMonitoringType(v)
+ *j = SpecDistributionModulesIngressNginxTLSProvider(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
- "dns01",
- "http01",
+const (
+ SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager"
+ SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret"
+ SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none"
+)
+
+// Kubernetes TLS secret for the ingresses TLS certificate.
+type SpecDistributionModulesIngressNginxTLSSecret struct {
+ // The Certificate Authority certificate file's content. You can use the
+ // `"{file://}"` notation to get the content from a file.
+ Ca string `json:"ca" yaml:"ca" mapstructure:"ca"`
+
+ // The certificate file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
+ Cert string `json:"cert" yaml:"cert" mapstructure:"cert"`
+
+ // The signing key file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
+ Key string `json:"key" yaml:"key" mapstructure:"key"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["hostedZoneId"]; !ok || v == nil {
- return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ if v, ok := raw["ca"]; !ok || v == nil {
+ return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
}
- if v, ok := raw["iamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ if v, ok := raw["cert"]; !ok || v == nil {
+ return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
}
- if v, ok := raw["region"]; !ok || v == nil {
- return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required")
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
}
- type Plain SpecDistributionModulesIngressClusterIssuerRoute53
+ type Plain SpecDistributionModulesIngressNginxTLSSecret
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain)
+ *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
return nil
}
+type SpecDistributionModulesIngressNginxTLS struct {
+ // The provider of the TLS certificates for the ingresses, one of: `none`,
+ // `certManager`, or `secret`.
+ Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
+
+ // Secret corresponds to the JSON schema field "secret".
+ Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
+ }
+ type Plain SpecDistributionModulesIngressNginxTLS
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressNginxTLS(plain)
+ return nil
+}
+
+type SpecDistributionModulesIngressNginxType string
+
+var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
+ "none",
+ "single",
+ "dual",
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ }
+ *j = SpecDistributionModulesIngressNginxType(v)
+ return nil
+}
+
+const (
+ SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none"
+ SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single"
+ SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual"
+)
+
+type SpecDistributionModulesIngressNginx struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Tls corresponds to the JSON schema field "tls".
+ Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"`
+
+ // The type of the Ingress nginx controller, options are:
+ // - `none`: no ingress controller will be installed and no infrastructural
+ // ingresses will be created.
+ // - `single`: a single ingress controller with ingress class `nginx` will be
+ // installed to manage all the ingress resources, infrastructural ingresses will
+ // be created.
+ // - `dual`: two independent ingress controllers will be installed, one for the
+ // `internal` ingress class intended for private ingresses and one for the
+ // `external` ingress class intended for public ingresses. KFD infrastructural
+ // ingresses wil use the `internal` ingress class when using the dual type.
+ //
+ // Default is `single`.
+ Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
}
- type Plain SpecDistributionModulesDr
+ type Plain SpecDistributionModulesIngressNginx
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDr(plain)
+ *j = SpecDistributionModulesIngressNginx(plain)
return nil
}
+type SpecDistributionModulesIngressOverridesIngresses struct {
+ // Forecastle corresponds to the JSON schema field "forecastle".
+ Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
+}
+
+// Override the common configuration with a particular configuration for the
+// Ingress module.
+type SpecDistributionModulesIngressOverrides struct {
+ // Ingresses corresponds to the JSON schema field "ingresses".
+ Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
+
+ // Set to override the node selector used to place the pods of the Ingress module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the Ingress
+ // module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
+type SpecDistributionModulesIngress struct {
+ // The base domain used for all the KFD infrastructural ingresses. If in the nginx
+ // `dual` configuration type, this value should be the same as the
+ // `.spec.distribution.modules.ingress.dns.private.name` zone.
+ BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"`
+
+ // Configuration for the cert-manager package. Required even if
+ // `ingress.nginx.type` is `none`, cert-manager is used for managing other
+ // certificates in the cluster besides the TLS termination certificates for the
+ // ingresses.
+ CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"`
+
+ // Dns corresponds to the JSON schema field "dns".
+ Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"`
+
+ // ExternalDns corresponds to the JSON schema field "externalDns".
+ ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"`
+
+ // Forecastle corresponds to the JSON schema field "forecastle".
+ Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
+
+ // Configurations for the Ingress nginx controller package.
+ Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["eks"]; !ok || v == nil {
- return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required")
+ if v, ok := raw["baseDomain"]; !ok || v == nil {
+ return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
}
- type Plain SpecDistributionModulesDrVelero
+ if v, ok := raw["certManager"]; !ok || v == nil {
+ return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required")
+ }
+ if v, ok := raw["externalDns"]; !ok || v == nil {
+ return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required")
+ }
+ if v, ok := raw["nginx"]; !ok || v == nil {
+ return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ }
+ type Plain SpecDistributionModulesIngress
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDrVelero(plain)
+ *j = SpecDistributionModulesIngress(plain)
return nil
}
+// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
+type SpecDistributionModulesLoggingCerebro struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+// When using the `customOutputs` logging type, you need to manually specify the
+// spec of the several `Output` and `ClusterOutputs` that the Logging Operator
+// expects to forward the logs collected by the pre-defined flows.
+type SpecDistributionModulesLoggingCustomOutputs struct {
+ // This value defines where the output from the `audit` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
+ Audit string `json:"audit" yaml:"audit" mapstructure:"audit"`
+
+ // This value defines where the output from the `errors` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
+ Errors string `json:"errors" yaml:"errors" mapstructure:"errors"`
+
+ // This value defines where the output from the `events` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
+ Events string `json:"events" yaml:"events" mapstructure:"events"`
+
+ // This value defines where the output from the `infra` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
+ Infra string `json:"infra" yaml:"infra" mapstructure:"infra"`
+
+ // This value defines where the output from the `ingressNginx` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
+ IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"`
+
+ // This value defines where the output from the `kubernetes` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
+ Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
+
+ // This value defines where the output from the `systemdCommon` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
+ SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"`
+
+ // This value defines where the output from the `systemdEtcd` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
+ SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["bucketName"]; !ok || v == nil {
- return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required")
+ if v, ok := raw["audit"]; !ok || v == nil {
+ return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["iamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required")
+ if v, ok := raw["errors"]; !ok || v == nil {
+ return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["region"]; !ok || v == nil {
- return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required")
+ if v, ok := raw["events"]; !ok || v == nil {
+ return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
}
- type Plain SpecDistributionModulesDrVeleroEks
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if v, ok := raw["infra"]; !ok || v == nil {
+ return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
}
- *j = SpecDistributionModulesDrVeleroEks(plain)
- return nil
-}
-
-const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2"
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
+ if v, ok := raw["ingressNginx"]; !ok || v == nil {
+ return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
+ if v, ok := raw["kubernetes"]; !ok || v == nil {
+ return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
}
- type Plain SpecDistributionModulesMonitoring
+ if v, ok := raw["systemdCommon"]; !ok || v == nil {
+ return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdEtcd"]; !ok || v == nil {
+ return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ type Plain SpecDistributionModulesLoggingCustomOutputs
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesMonitoring(plain)
+ *j = SpecDistributionModulesLoggingCustomOutputs(plain)
return nil
}
-const (
- TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1"
- TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1"
-)
+type SpecDistributionModulesLoggingLokiBackend string
-var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
- "none",
+var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
}
- *j = SpecDistributionModulesNetworkingType(v)
+ *j = SpecDistributionModulesLoggingLokiBackend(v)
return nil
}
const (
- TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1"
- TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2"
- TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1"
+ SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio"
+ SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint"
)
-var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
- "deny",
- "dryrun",
- "warn",
+// Configuration for Loki's external storage backend.
+type SpecDistributionModulesLoggingLokiExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // External S3-compatible endpoint for Loki's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
- }
- *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
- return nil
+type TypesKubeResourcesLimits struct {
+ // The CPU limit for the Pod. Example: `1000m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory limit for the Pod. Example: `1G`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
}
-const (
- TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1"
- TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1"
- TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1"
- TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3"
-)
+type TypesKubeResourcesRequests struct {
+ // The CPU request for the Pod, in cores. Example: `500m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory request for the Pod. Example: `500M`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+}
+
+type TypesKubeResources struct {
+ // Limits corresponds to the JSON schema field "limits".
+ Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
+
+ // Requests corresponds to the JSON schema field "requests".
+ Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
+}
+
+// Configuration for the Loki package.
+type SpecDistributionModulesLoggingLoki struct {
+ // The storage backend type for Loki. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Loki's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the
+ // time series database from BoltDB to TSDB and the schema from v11 to v13 that it
+ // uses to store the logs.
+ //
+ // The value of this field will determine the date when Loki will start writing
+ // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB
+ // and schema will be kept until they expire for reading purposes.
+ //
+ // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example:
+ // `2024-11-18`.
+ TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"`
+}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["enforcementAction"]; !ok || v == nil {
- return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
- }
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
+ if v, ok := raw["tsdbStartDate"]; !ok || v == nil {
+ return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required")
}
- type Plain SpecDistributionModulesPolicyGatekeeper
+ type Plain SpecDistributionModulesLoggingLoki
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyGatekeeper(plain)
+ *j = SpecDistributionModulesLoggingLoki(plain)
return nil
}
-const TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2"
+type SpecDistributionModulesLoggingMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
- "Audit",
- "Enforce",
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Logging's MinIO deployment.
+type SpecDistributionModulesLoggingMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesLoggingOpensearchType string
+
+var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
+ "single",
+ "triple",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
}
- *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
+ *j = SpecDistributionModulesLoggingOpensearchType(v)
return nil
}
const (
- TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1"
- TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2"
- TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1"
+ SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single"
+ SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple"
)
+type SpecDistributionModulesLoggingOpensearch struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // The storage size for the OpenSearch volumes. Follows Kubernetes resources
+ // storage requests. Default is `150Gi`.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+
+ // The type of OpenSearch deployment. One of: `single` for a single replica or
+ // `triple` for an HA 3-replicas deployment.
+ Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
- }
- if v, ok := raw["validationFailureAction"]; !ok || v == nil {
- return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
}
- type Plain SpecDistributionModulesPolicyKyverno
+ type Plain SpecDistributionModulesLoggingOpensearch
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyKyverno(plain)
+ *j = SpecDistributionModulesLoggingOpensearch(plain)
return nil
}
-const TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1"
+// Configuration for the Logging Operator.
+type SpecDistributionModulesLoggingOperator struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesLoggingType string
-var enumValues_SpecDistributionModulesPolicyType = []interface{}{
+var enumValues_SpecDistributionModulesLoggingType = []interface{}{
"none",
- "gatekeeper",
- "kyverno",
+ "opensearch",
+ "loki",
+ "customOutputs",
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
- }
- *j = SpecDistributionModulesPolicyType(v)
- return nil
+const (
+ SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
+ SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none"
+ SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch"
+ SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki"
+ SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs"
+)
+
+// Configuration for the Logging module.
+type SpecDistributionModulesLogging struct {
+ // Cerebro corresponds to the JSON schema field "cerebro".
+ Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"`
+
+ // CustomOutputs corresponds to the JSON schema field "customOutputs".
+ CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"`
+
+ // Loki corresponds to the JSON schema field "loki".
+ Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"`
+
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Opensearch corresponds to the JSON schema field "opensearch".
+ Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"`
+
+ // Operator corresponds to the JSON schema field "operator".
+ Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Selects the logging stack. Options are:
+ // - `none`: will disable the centralized logging.
+ // - `opensearch`: will deploy and configure the Logging Operator and an
+ // OpenSearch cluster (can be single or triple for HA) where the logs will be
+ // stored.
+ // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for
+ // storage.
+ // - `customOuputs`: the Logging Operator will be deployed and installed but
+ // without in-cluster storage, you will have to create the needed Outputs and
+ // ClusterOutputs to ship the logs to your desired storage.
+ //
+ // Default is `opensearch`.
+ Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"`
}
-const (
- TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2"
- TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1"
- TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1"
- TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4"
-)
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
+ return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
}
- type Plain SpecDistributionModulesPolicy
+ type Plain SpecDistributionModulesLogging
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicy(plain)
+ *j = SpecDistributionModulesLogging(plain)
return nil
}
-const (
- TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3"
- TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2"
- TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1"
-)
+type SpecDistributionModulesMonitoringAlertManager struct {
+ // The webhook URL to send dead man's switch monitoring, for example to use with
+ // healthchecks.io.
+ DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
-var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
+ // Set to false to avoid installing the Prometheus rules (alerts) included with
+ // the distribution.
+ InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"`
+
+ // The Slack webhook URL where to send the infrastructural and workload alerts to.
+ SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringBlackboxExporter struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringGrafana struct {
+ // Setting this to true will deploy an additional `grafana-basic-auth` ingress
+ // protected with Grafana's basic auth instead of SSO. It's intended use is as a
+ // temporary ingress for when there are problems with the SSO login flow.
+ //
+ // Notice that by default anonymous access is enabled.
+ BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's
+ // role. Example:
+ //
+ // ```yaml
+ // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' ||
+ // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') &&
+ // 'Viewer'
+ // ```
+ //
+ // More details in [Grafana's
+ // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping).
+ UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringKubeStateMetrics struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringMimirBackend string
+
+var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
"minio",
"externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
}
- *j = SpecDistributionModulesTracingTempoBackend(v)
+ *j = SpecDistributionModulesMonitoringMimirBackend(v)
return nil
}
const (
- TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2"
- TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1"
- TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3"
- TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2"
- TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1"
+ SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio"
+ SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint"
)
-var enumValues_SpecDistributionModulesTracingType = []interface{}{
- "none",
- "tempo",
+// Configuration for Mimir's external storage backend.
+type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // The external S3-compatible endpoint for Mimir's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
- }
- *j = SpecDistributionModulesTracingType(v)
- return nil
+// Configuration for the Mimir package.
+type SpecDistributionModulesMonitoringMimir struct {
+ // The storage backend type for Mimir. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Mimir's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The retention time for the logs stored in Mimir. Default is `30d`. Value must
+ // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365
+ // days.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
}
-const (
- TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1"
- TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1"
-)
+type SpecDistributionModulesMonitoringMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_TypesAwsRegion {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v)
- }
- *j = TypesAwsRegion(v)
- return nil
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
- }
- type Plain SpecDistributionModulesTracing
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesTracing(plain)
- return nil
+// Configuration for Monitoring's MinIO deployment.
+type SpecDistributionModulesMonitoringMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
-var enumValues_TypesAwsRegion = []interface{}{
- "af-south-1",
- "ap-east-1",
- "ap-northeast-1",
- "ap-northeast-2",
- "ap-northeast-3",
- "ap-south-1",
- "ap-south-2",
- "ap-southeast-1",
- "ap-southeast-2",
- "ap-southeast-3",
- "ap-southeast-4",
- "ca-central-1",
- "eu-central-1",
- "eu-central-2",
- "eu-north-1",
- "eu-south-1",
- "eu-south-2",
- "eu-west-1",
- "eu-west-2",
- "eu-west-3",
- "me-central-1",
- "me-south-1",
- "sa-east-1",
- "us-east-1",
- "us-east-2",
- "us-gov-east-1",
- "us-gov-west-1",
- "us-west-1",
- "us-west-2",
+type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheus struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // The retention size for the `k8s` Prometheus instance.
+ RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
+
+ // The retention time for the `k8s` Prometheus instance.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+
+ // The storage size for the `k8s` Prometheus instance.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["dr"]; !ok || v == nil {
- return fmt.Errorf("field dr in SpecDistributionModules: required")
- }
- if v, ok := raw["ingress"]; !ok || v == nil {
- return fmt.Errorf("field ingress in SpecDistributionModules: required")
- }
- if v, ok := raw["logging"]; !ok || v == nil {
- return fmt.Errorf("field logging in SpecDistributionModules: required")
- }
- if v, ok := raw["policy"]; !ok || v == nil {
- return fmt.Errorf("field policy in SpecDistributionModules: required")
- }
- type Plain SpecDistributionModules
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModules(plain)
- return nil
+type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheusAgent struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
}
-type TypesAwsRegion string
+type SpecDistributionModulesMonitoringType string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["modules"]; !ok || v == nil {
- return fmt.Errorf("field modules in SpecDistribution: required")
- }
- type Plain SpecDistribution
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistribution(plain)
- return nil
+var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
+ "none",
+ "prometheus",
+ "prometheusAgent",
+ "mimir",
}
-type TypesCidr string
-
-type TypesAwsS3BucketName string
+const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1"
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error {
+func (j *Metadata) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["private"]; !ok || v == nil {
- return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
- }
- if v, ok := raw["public"]; !ok || v == nil {
- return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in Metadata: required")
}
- type Plain SpecInfrastructureVpcNetworkSubnetsCidrs
+ type Plain Metadata
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain)
+ if len(plain.Name) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "name", 1)
+ }
+ if len(plain.Name) > 56 {
+ return fmt.Errorf("field %s length: must be <= %d", "name", 56)
+ }
+ *j = Metadata(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrType {
+ for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
}
- *j = SpecDistributionModulesDrType(v)
+ *j = SpecDistributionModulesNetworkingType(v)
return nil
}
+const (
+ SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
+ SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
+)
+
+type SpecDistributionModulesMonitoringX509Exporter struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+// Configuration for the Monitoring module.
+type SpecDistributionModulesMonitoring struct {
+ // Alertmanager corresponds to the JSON schema field "alertmanager".
+ Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"`
+
+ // BlackboxExporter corresponds to the JSON schema field "blackboxExporter".
+ BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"`
+
+ // Grafana corresponds to the JSON schema field "grafana".
+ Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"`
+
+ // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics".
+ KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"`
+
+ // Mimir corresponds to the JSON schema field "mimir".
+ Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"`
+
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Prometheus corresponds to the JSON schema field "prometheus".
+ Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"`
+
+ // PrometheusAgent corresponds to the JSON schema field "prometheusAgent".
+ PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"`
+
+ // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or
+ // `mimir`.
+ //
+ // - `none`: will disable the whole monitoring stack.
+ // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
+ // instance, Alertmanager, a set of alert rules, exporters needed to monitor all
+ // the components of the cluster, Grafana and a series of dashboards to view the
+ // collected metrics, and more.
+ // - `prometheusAgent`: will install Prometheus operator, an instance of
+ // Prometheus in Agent mode (no alerting, no queries, no storage), and all the
+ // exporters needed to get metrics for the status of the cluster and the
+ // workloads. Useful when having a centralized (remote) Prometheus where to ship
+ // the metrics and not storing them locally in the cluster.
+ // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir
+ // that allows for longer retention of metrics and the usage of Object Storage.
+ //
+ // Default is `prometheus`.
+ Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
+
+ // X509Exporter corresponds to the JSON schema field "x509Exporter".
+ X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["cidr"]; !ok || v == nil {
- return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required")
- }
- if v, ok := raw["subnetsCidrs"]; !ok || v == nil {
- return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
}
- type Plain SpecInfrastructureVpcNetwork
+ type Plain SpecDistributionModulesMonitoring
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpcNetwork(plain)
+ *j = SpecDistributionModulesMonitoring(plain)
return nil
}
-var enumValues_SpecDistributionModulesDrType = []interface{}{
+type SpecDistributionModulesNetworkingTigeraOperator struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesNetworkingType string
+
+var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
"none",
- "eks",
+}
+
+const (
+ SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
+ SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none"
+)
+
+// Configuration for the Networking module.
+type SpecDistributionModulesNetworking struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
+ TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
+
+ // Type corresponds to the JSON schema field "type".
+ Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
+}
+
+type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
+
+var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
+ "deny",
+ "dryrun",
+ "warn",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["network"]; !ok || v == nil {
- return fmt.Errorf("field network in SpecInfrastructureVpc: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecInfrastructureVpc
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
}
- *j = SpecInfrastructureVpc(plain)
+ *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
return nil
}
-type TypesAwsS3BucketNamePrefix string
+const (
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
+)
-type TypesTcpPort int
+// Configuration for the Gatekeeper package.
+type SpecDistributionModulesPolicyGatekeeper struct {
+ // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
+ // enforce the constraints on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
+
+ // The default enforcement action to use for the included constraints. `deny` will
+ // block the admission when violations to the policies are found, `warn` will show
+ // a message to the user but will admit the violating requests and `dryrun` won't
+ // give any feedback to the user but it will log the violations.
+ EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
+
+ // Set to `false` to avoid installing the default Gatekeeper policies (constraints
+ // templates and constraints) included with the distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["clusterAutoscaler"]; !ok || v == nil {
- return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required")
- }
- if v, ok := raw["ebsCsiDriver"]; !ok || v == nil {
- return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required")
- }
- if v, ok := raw["loadBalancerController"]; !ok || v == nil {
- return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required")
+ if v, ok := raw["enforcementAction"]; !ok || v == nil {
+ return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
}
- if v, ok := raw["overrides"]; !ok || v == nil {
- return fmt.Errorf("field overrides in SpecDistributionModulesAws: required")
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
}
- type Plain SpecDistributionModulesAws
+ type Plain SpecDistributionModulesPolicyGatekeeper
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAws(plain)
+ *j = SpecDistributionModulesPolicyGatekeeper(plain)
return nil
}
+type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
+
+var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
+ "Audit",
+ "Enforce",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["allowedFromCidrs"]; !ok || v == nil {
- return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required")
- }
- if v, ok := raw["githubUsersName"]; !ok || v == nil {
- return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required")
- }
- type Plain SpecInfrastructureVpnSsh
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1)
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
}
- *j = SpecInfrastructureVpnSsh(plain)
+ *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
return nil
}
-type TypesAwsVpcId string
+const (
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
+)
+
+// Configuration for the Kyverno package.
+type SpecDistributionModulesPolicyKyverno struct {
+ // This parameter adds namespaces to Kyverno's exemption list, so it will not
+ // enforce the policies on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-type TypesFuryModuleOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
- Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
+ // Set to `false` to avoid installing the default Kyverno policies included with
+ // distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
- // The node selector to use to place the pods for the dr module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The tolerations that will be added to the pods for the monitoring module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+ // The validation failure action to use for the policies, `Enforce` will block
+ // when a request does not comply with the policies and `Audit` will not block but
+ // log when a request does not comply with the policies.
+ ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ssh"]; !ok || v == nil {
- return fmt.Errorf("field ssh in SpecInfrastructureVpn: required")
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
}
- if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil {
- return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required")
+ if v, ok := raw["validationFailureAction"]; !ok || v == nil {
+ return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
}
- type Plain SpecInfrastructureVpn
+ type Plain SpecDistributionModulesPolicyKyverno
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpn(plain)
+ *j = SpecDistributionModulesPolicyKyverno(plain)
return nil
}
-type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
-
-type TypesFuryModuleOverridesIngress struct {
- // If true, the ingress will not have authentication
- DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
-
- // The host of the ingress
- Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
+type SpecDistributionModulesPolicyType string
- // The ingress class of the ingress
- IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
+var enumValues_SpecDistributionModulesPolicyType = []interface{}{
+ "none",
+ "gatekeeper",
+ "kyverno",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["privateAccess"]; !ok || v == nil {
- return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required")
- }
- if v, ok := raw["publicAccess"]; !ok || v == nil {
- return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecKubernetesAPIServer
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
}
- *j = SpecKubernetesAPIServer(plain)
+ *j = SpecDistributionModulesPolicyType(v)
return nil
}
+const (
+ SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
+ SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
+ SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
+)
+
+// Configuration for the Policy module.
+type SpecDistributionModulesPolicy struct {
+ // Gatekeeper corresponds to the JSON schema field "gatekeeper".
+ Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
+
+ // Kyverno corresponds to the JSON schema field "kyverno".
+ Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The type of policy enforcement to use, either `none`, `gatekeeper` or
+ // `kyverno`.
+ //
+ // Default is `none`.
+ Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["iamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
}
- type Plain SpecDistributionModulesAwsLoadBalancerController
+ type Plain SpecDistributionModulesPolicy
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAwsLoadBalancerController(plain)
+ *j = SpecDistributionModulesPolicy(plain)
return nil
}
+type SpecDistributionModulesTracingMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
+
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Tracing's MinIO deployment.
+type SpecDistributionModulesTracingMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesTracingTempoBackend string
+
+var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["groups"]; !ok || v == nil {
- return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required")
- }
- if v, ok := raw["rolearn"]; !ok || v == nil {
- return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecKubernetesAwsAuthRole
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
}
- *j = SpecKubernetesAwsAuthRole(plain)
+ *j = SpecDistributionModulesTracingTempoBackend(v)
return nil
}
+const (
+ SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
+ SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
+)
+
+// Configuration for Tempo's external storage backend.
+type SpecDistributionModulesTracingTempoExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // The external S3-compatible endpoint for Tempo's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
+}
+
+// Configuration for the Tempo package.
+type SpecDistributionModulesTracingTempo struct {
+ // The storage backend type for Tempo. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Tempo's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The retention time for the traces stored in Tempo.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+}
+
+type SpecDistributionModulesTracingType string
+
+var enumValues_SpecDistributionModulesTracingType = []interface{}{
+ "none",
+ "tempo",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["iamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesTracingType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesAwsEbsCsiDriver
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
}
- *j = SpecDistributionModulesAwsEbsCsiDriver(plain)
+ *j = SpecDistributionModulesTracingType(v)
return nil
}
+const (
+ SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
+ SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
+)
+
+// Configuration for the Tracing module.
+type SpecDistributionModulesTracing struct {
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Tempo corresponds to the JSON schema field "tempo".
+ Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
+
+ // The type of tracing to use, either `none` or `tempo`. `none` will disable the
+ // Tracing module and `tempo` will install a Grafana Tempo deployment.
+ //
+ // Default is `tempo`.
+ Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["groups"]; !ok || v == nil {
- return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required")
- }
- if v, ok := raw["userarn"]; !ok || v == nil {
- return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
}
- type Plain SpecKubernetesAwsAuthUser
+ type Plain SpecDistributionModulesTracing
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesAwsAuthUser(plain)
+ *j = SpecDistributionModulesTracing(plain)
return nil
}
+type SpecDistributionModules struct {
+ // Auth corresponds to the JSON schema field "auth".
+ Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
+
+ // Aws corresponds to the JSON schema field "aws".
+ Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"`
+
+ // Dr corresponds to the JSON schema field "dr".
+ Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"`
+
+ // Ingress corresponds to the JSON schema field "ingress".
+ Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"`
+
+ // Logging corresponds to the JSON schema field "logging".
+ Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"`
+
+ // Monitoring corresponds to the JSON schema field "monitoring".
+ Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"`
+
+ // Networking corresponds to the JSON schema field "networking".
+ Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"`
+
+ // Policy corresponds to the JSON schema field "policy".
+ Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"`
+
+ // Tracing corresponds to the JSON schema field "tracing".
+ Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["iamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required")
+ if v, ok := raw["dr"]; !ok || v == nil {
+ return fmt.Errorf("field dr in SpecDistributionModules: required")
}
- type Plain SpecDistributionModulesAwsClusterAutoscaler
+ if v, ok := raw["ingress"]; !ok || v == nil {
+ return fmt.Errorf("field ingress in SpecDistributionModules: required")
+ }
+ if v, ok := raw["logging"]; !ok || v == nil {
+ return fmt.Errorf("field logging in SpecDistributionModules: required")
+ }
+ if v, ok := raw["policy"]; !ok || v == nil {
+ return fmt.Errorf("field policy in SpecDistributionModules: required")
+ }
+ type Plain SpecDistributionModules
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAwsClusterAutoscaler(plain)
+ *j = SpecDistributionModules(plain)
return nil
}
-type TypesAwsIamRoleNamePrefix string
-
-type TypesFuryModuleComponentOverridesWithIAMRoleName struct {
- // IamRoleName corresponds to the JSON schema field "iamRoleName".
- IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"`
-
- // The node selector to use to place the pods for the load balancer controller
- // module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+type SpecDistribution struct {
+ // Common corresponds to the JSON schema field "common".
+ Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"`
- // The tolerations that will be added to the pods for the cluster autoscaler
- // module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
-}
+ // CustomPatches corresponds to the JSON schema field "customPatches".
+ CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"`
-var enumValues_SpecKubernetesLogsTypesElem = []interface{}{
- "api",
- "audit",
- "authenticator",
- "controllerManager",
- "scheduler",
+ // Modules corresponds to the JSON schema field "modules".
+ Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecKubernetesLogsTypesElem {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["modules"]; !ok || v == nil {
+ return fmt.Errorf("field modules in SpecDistribution: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v)
+ type Plain SpecDistribution
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- *j = SpecKubernetesLogsTypesElem(v)
+ *j = SpecDistribution(plain)
return nil
}
-type TypesAwsIamRoleName string
+type TypesCidr string
-type TypesAwsArn string
+// Network CIDRS configuration for private and public subnets.
+type SpecInfrastructureVpcNetworkSubnetsCidrs struct {
+ // The network CIDRs for the private subnets, where the nodes, the pods, and the
+ // private load balancers will be created
+ Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"`
+
+ // The network CIDRs for the public subnets, where the public load balancers and
+ // the VPN servers will be created
+ Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"`
+}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
+ if v, ok := raw["private"]; !ok || v == nil {
+ return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
}
- type Plain SpecDistributionModulesAuth
+ if v, ok := raw["public"]; !ok || v == nil {
+ return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
+ }
+ type Plain SpecInfrastructureVpcNetworkSubnetsCidrs
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuth(plain)
+ *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain)
return nil
}
+type SpecInfrastructureVpcNetwork struct {
+ // The network CIDR for the VPC that will be created
+ Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"`
+
+ // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs".
+ SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
+ if v, ok := raw["cidr"]; !ok || v == nil {
+ return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required")
}
- type Plain SpecDistributionModulesAuthProvider
+ if v, ok := raw["subnetsCidrs"]; !ok || v == nil {
+ return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required")
+ }
+ type Plain SpecInfrastructureVpcNetwork
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProvider(plain)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
- }
- *j = SpecDistributionModulesAuthProviderType(v)
+ *j = SpecInfrastructureVpcNetwork(plain)
return nil
}
-var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
- "none",
- "basicAuth",
- "sso",
+// Configuration for the VPC that will be created to host the EKS cluster and its
+// related resources. If you already have a VPC that you want to use, leave this
+// section empty and use `.spec.kubernetes.vpcId` instead.
+type SpecInfrastructureVpc struct {
+ // Network corresponds to the JSON schema field "network".
+ Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["from"]; !ok || v == nil {
- return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
- }
- if v, ok := raw["to"]; !ok || v == nil {
- return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
+ if v, ok := raw["network"]; !ok || v == nil {
+ return fmt.Errorf("field network in SpecInfrastructureVpc: required")
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts
+ type Plain SpecInfrastructureVpc
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain)
+ *j = SpecInfrastructureVpc(plain)
return nil
}
-type TypesAwsIpProtocol string
+type TypesAwsS3BucketNamePrefix string
-type TypesAwsTags map[string]string
+type TypesTcpPort int
+
+type SpecInfrastructureVpnSsh struct {
+ // The network CIDR enabled in the security group to access the VPN servers
+ // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source.
+ AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"`
+
+ // List of GitHub usernames from whom get their SSH public key and add as
+ // authorized keys of the `operatorName` user.
+ GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"`
+
+ // **NOT IN USE**, use `githubUsersName` instead. This value defines the public
+ // keys that will be added to the bastion's operating system.
+ PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"`
+}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["password"]; !ok || v == nil {
- return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
+ if v, ok := raw["allowedFromCidrs"]; !ok || v == nil {
+ return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required")
}
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ if v, ok := raw["githubUsersName"]; !ok || v == nil {
+ return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required")
}
- type Plain SpecDistributionModulesAuthProviderBasicAuth
+ type Plain SpecInfrastructureVpnSsh
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1)
+ }
+ *j = SpecInfrastructureVpnSsh(plain)
return nil
}
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{
- "ingress",
- "egress",
-}
+type TypesAwsVpcId string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v)
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v)
- return nil
+// Configuration for the VPN server instances.
+type SpecInfrastructureVpn struct {
+ // This value defines the prefix for the bucket name where the VPN servers will
+ // store their state (VPN certificates, users).
+ BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"`
+
+ // The `dhParamsBits` size used for the creation of the .pem file that will be
+ // used in the dh openvpn server.conf file.
+ DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"`
+
+ // The size of the disk in GB for each VPN server. Example: entering `50` will
+ // create disks of 50 GB.
+ DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"`
+
+ // Overrides IAM user name for the VPN. Default is to use the cluster name.
+ IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"`
+
+ // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2
+ // nomenclature. Example: `t3-micro`.
+ InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"`
+
+ // The number of VPN server instances to create, `0` to skip the creation.
+ Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"`
+
+ // The username of the account to create in the bastion's operating system.
+ OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"`
+
+ // The port where each OpenVPN server will listen for connections.
+ Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"`
+
+ // Ssh corresponds to the JSON schema field "ssh".
+ Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"`
+
+ // The ID of the VPC where the VPN server instances will be created, required only
+ // if `.spec.infrastructure.vpc` is omitted.
+ VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
+
+ // The network CIDR that will be used to assign IP addresses to the VPN clients
+ // when connected.
+ VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["host"]; !ok || v == nil {
- return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["ssh"]; !ok || v == nil {
+ return fmt.Errorf("field ssh in SpecInfrastructureVpn: required")
}
- if v, ok := raw["ingressClass"]; !ok || v == nil {
- return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil {
+ return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required")
}
- type Plain SpecDistributionModulesAuthOverridesIngress
+ type Plain SpecInfrastructureVpn
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthOverridesIngress(plain)
+ *j = SpecInfrastructureVpn(plain)
return nil
}
+type SpecInfrastructure struct {
+ // Vpc corresponds to the JSON schema field "vpc".
+ Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"`
+
+ // Vpn corresponds to the JSON schema field "vpn".
+ Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"`
+}
+
+type SpecKubernetesAPIServer struct {
+ // This value defines if the Kubernetes API server will be accessible from the
+ // private subnets. Default it `true`.
+ PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"`
+
+ // The network CIDRs from the private subnets that will be allowed access the
+ // Kubernetes API server.
+ PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"`
+
+ // This value defines if the Kubernetes API server will be accessible from the
+ // public subnets. Default is `false`.
+ PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"`
+
+ // The network CIDRs from the public subnets that will be allowed access the
+ // Kubernetes API server.
+ PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["connectors"]; !ok || v == nil {
- return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
+ if v, ok := raw["privateAccess"]; !ok || v == nil {
+ return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required")
}
- type Plain SpecDistributionModulesAuthDex
+ if v, ok := raw["publicAccess"]; !ok || v == nil {
+ return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required")
+ }
+ type Plain SpecKubernetesAPIServer
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthDex(plain)
+ *j = SpecKubernetesAPIServer(plain)
return nil
}
-type TypesFuryModuleComponentOverrides struct {
- // The node selector to use to place the pods for the minio module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+type SpecKubernetesAwsAuthRole struct {
+ // Groups corresponds to the JSON schema field "groups".
+ Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"`
- // The tolerations that will be added to the pods for the cert-manager module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+ // Rolearn corresponds to the JSON schema field "rolearn".
+ Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"`
+
+ // Username corresponds to the JSON schema field "username".
+ Username string `json:"username" yaml:"username" mapstructure:"username"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["cidrBlocks"]; !ok || v == nil {
- return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ if v, ok := raw["groups"]; !ok || v == nil {
+ return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required")
}
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ if v, ok := raw["rolearn"]; !ok || v == nil {
+ return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required")
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required")
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock
+ type Plain SpecKubernetesAwsAuthRole
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain)
+ *j = SpecKubernetesAwsAuthRole(plain)
return nil
}
+type SpecKubernetesAwsAuthUser struct {
+ // Groups corresponds to the JSON schema field "groups".
+ Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"`
+
+ // Userarn corresponds to the JSON schema field "userarn".
+ Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"`
+
+ // Username corresponds to the JSON schema field "username".
+ Username string `json:"username" yaml:"username" mapstructure:"username"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
+ if v, ok := raw["groups"]; !ok || v == nil {
+ return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required")
}
- type Plain SpecDistributionCustomPatchesSecretGeneratorResource
+ if v, ok := raw["userarn"]; !ok || v == nil {
+ return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required")
+ }
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required")
+ }
+ type Plain SpecKubernetesAwsAuthUser
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
+ *j = SpecKubernetesAwsAuthUser(plain)
return nil
}
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{
- "ingress",
- "egress",
-}
+// Optional additional security configuration for EKS IAM via the `aws-auth`
+// configmap.
+//
+// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html
+type SpecKubernetesAwsAuth struct {
+ // This optional array defines additional AWS accounts that will be added to the
+ // `aws-auth` configmap.
+ AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v)
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v)
- return nil
+ // This optional array defines additional IAM roles that will be added to the
+ // `aws-auth` configmap.
+ Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"`
+
+ // This optional array defines additional IAM users that will be added to the
+ // `aws-auth` configmap.
+ Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
- }
- *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
- return nil
+type TypesAwsIamRoleNamePrefix string
+
+type SpecKubernetesLogRetentionDays int
+
+var enumValues_SpecKubernetesLogRetentionDays = []interface{}{
+ 0,
+ 1,
+ 3,
+ 5,
+ 7,
+ 14,
+ 30,
+ 60,
+ 90,
+ 120,
+ 150,
+ 180,
+ 365,
+ 400,
+ 545,
+ 731,
+ 1096,
+ 1827,
+ 2192,
+ 2557,
+ 2922,
+ 3288,
+ 3653,
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
- var v string
+func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error {
+ var v int
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
+ for _, expected := range enumValues_SpecKubernetesLogRetentionDays {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v)
}
- *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
+ *j = SpecKubernetesLogRetentionDays(v)
return nil
}
-const TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule"
-
-var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
- "none",
- "prometheus",
- "prometheusAgent",
- "mimir",
-}
-
-type SpecToolsConfigurationTerraformStateS3 struct {
- // This value defines which bucket will be used to store all the states
- BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"`
-
- // This value defines which folder will be used to store all the states inside the
- // bucket
- KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"`
-
- // This value defines in which region the bucket is located
- Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
-
- // This value defines if the region of the bucket should be validated or not by
- // Terraform, useful when using a bucket in a recently added region
- SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"`
-}
+type SpecKubernetesLogsTypesElem string
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{
- "ingress",
- "egress",
+var enumValues_SpecKubernetesLogsTypesElem = []interface{}{
+ "api",
+ "audit",
+ "authenticator",
+ "controllerManager",
+ "scheduler",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType {
+ for _, expected := range enumValues_SpecKubernetesLogsTypesElem {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v)
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v)
+ *j = SpecKubernetesLogsTypesElem(v)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["clusterIssuer"]; !ok || v == nil {
- return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
- }
- type Plain SpecDistributionModulesIngressCertManager
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressCertManager(plain)
- return nil
-}
+const (
+ SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api"
+ SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit"
+ SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator"
+ SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager"
+ SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler"
+)
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["create"]; !ok || v == nil {
- return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required")
- }
- if v, ok := raw["vpcId"]; !ok || v == nil {
- return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required")
- }
- type Plain SpecDistributionModulesIngressDNSPrivate
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressDNSPrivate(plain)
- return nil
-}
+type SpecKubernetesNodePoolGlobalAmiType string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["create"]; !ok || v == nil {
- return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required")
- }
- type Plain SpecDistributionModulesIngressDNSPublic
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressDNSPublic(plain)
- return nil
+var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{
+ "alinux2",
+ "alinux2023",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil {
- return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v)
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain)
+ *j = SpecKubernetesNodePoolGlobalAmiType(v)
return nil
}
+const (
+ SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2"
+ SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023"
+)
+
+// Port range for the Firewall Rule.
+type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct {
+ // From corresponds to the JSON schema field "from".
+ From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"`
+
+ // To corresponds to the JSON schema field "to".
+ To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["privateIamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required")
+ if v, ok := raw["from"]; !ok || v == nil {
+ return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
}
- if v, ok := raw["publicIamRoleArn"]; !ok || v == nil {
- return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required")
+ if v, ok := raw["to"]; !ok || v == nil {
+ return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
}
- type Plain SpecDistributionModulesIngressExternalDNS
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressExternalDNS(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain)
return nil
}
+type TypesAwsIpProtocol string
+
+type TypesAwsTags map[string]string
+
+type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{
+ "ingress",
+ "egress",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- type Plain SpecKubernetesNodePoolAdditionalFirewallRules
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
- }
- if plain.Self != nil && len(plain.Self) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "self", 1)
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1)
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v)
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
- "certManager",
- "secret",
- "none",
+const (
+ SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress"
+ SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress"
+)
+
+type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct {
+ // CidrBlocks corresponds to the JSON schema field "cidrBlocks".
+ CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"`
+
+ // Name corresponds to the JSON schema field "name".
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // Ports corresponds to the JSON schema field "ports".
+ Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
+
+ // Protocol corresponds to the JSON schema field "protocol".
+ Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
+
+ // Additional AWS tags for the Firewall rule.
+ Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
+ Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["id"]; !ok || v == nil {
- return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required")
+ if v, ok := raw["cidrBlocks"]; !ok || v == nil {
+ return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ }
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ }
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- if v, ok := raw["owner"]; !ok || v == nil {
- return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- type Plain SpecKubernetesNodePoolAmi
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesNodePoolAmi(plain)
+ if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain)
return nil
}
+type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{
+ "ingress",
+ "egress",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v)
}
- *j = SpecDistributionModulesIngressNginxTLSProvider(v)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v)
return nil
}
-var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{
- "docker",
- "containerd",
+const (
+ SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress"
+ SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress"
+)
+
+type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct {
+ // The name of the Firewall rule.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // Ports corresponds to the JSON schema field "ports".
+ Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
+
+ // The protocol of the Firewall rule.
+ Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
+
+ // If `true`, the source will be the security group itself.
+ Self bool `json:"self" yaml:"self" mapstructure:"self"`
+
+ // Additional AWS tags for the Firewall rule.
+ Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
+ Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["self"]; !ok || v == nil {
+ return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain)
+ return nil
+}
+
+type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{
+ "ingress",
+ "egress",
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime {
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v)
}
- *j = SpecKubernetesNodePoolContainerRuntime(v)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v)
return nil
}
+const (
+ SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress"
+ SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress"
+)
+
+type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct {
+ // The name for the additional Firewall rule Security Group.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // Ports corresponds to the JSON schema field "ports".
+ Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
+
+ // The protocol of the Firewall rule.
+ Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
+
+ // The source security group ID.
+ SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"`
+
+ // Additional AWS tags for the Firewall rule.
+ Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
+ Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ca"]; !ok || v == nil {
- return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
}
- if v, ok := raw["cert"]; !ok || v == nil {
- return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
}
- if v, ok := raw["key"]; !ok || v == nil {
- return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
}
- type Plain SpecDistributionModulesIngressNginxTLSSecret
+ if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil {
+ return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ }
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain)
return nil
}
+// Optional additional firewall rules that will be attached to the nodes.
+type SpecKubernetesNodePoolAdditionalFirewallRules struct {
+ // The CIDR blocks objects definition for the Firewall rule. Even though it is a
+ // list, only one item is currently supported. See
+ // https://github.com/sighupio/fury-eks-installer/issues/46 for more details.
+ CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"`
+
+ // Self corresponds to the JSON schema field "self".
+ Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"`
+
+ // SourceSecurityGroupId corresponds to the JSON schema field
+ // "sourceSecurityGroupId".
+ SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
- }
- type Plain SpecDistributionModulesIngressNginxTLS
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRules
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginxTLS(plain)
+ if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
+ }
+ if len(plain.CidrBlocks) > 1 {
+ return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1)
+ }
+ if plain.Self != nil && len(plain.Self) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "self", 1)
+ }
+ if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1)
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
- "none",
- "single",
- "dual",
-}
+type SpecKubernetesNodePoolAmiType string
-var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{
- "gp2",
- "gp3",
- "io1",
- "standard",
+var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{
+ "alinux2",
+ "alinux2023",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType {
+ for _, expected := range enumValues_SpecKubernetesNodePoolAmiType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v)
}
- *j = SpecKubernetesNodePoolInstanceVolumeType(v)
+ *j = SpecKubernetesNodePoolAmiType(v)
return nil
}
+const (
+ SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2"
+ SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023"
+)
+
+// Configuration for customize the Amazon Machine Image (AMI) for the machines of
+// the Node Pool.
+//
+// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields
+// for using a custom AMI (just with `self-managed` node pool type) or by setting
+// the `ami.type` field to one of the official AMIs based on Amazon Linux.
+type SpecKubernetesNodePoolAmi struct {
+ // The ID of the AMI to use for the nodes, must be set toghether with the `owner`
+ // field. `ami.id` and `ami.owner` can be only set when Node Pool type is
+ // `self-managed` and they can't be set at the same time than `ami.type`.
+ Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"`
+
+ // The owner of the AMI to use for the nodes, must be set toghether with the `id`
+ // field. `ami.id` and `ami.owner` can be only set when Node Pool type is
+ // `self-managed` and they can't be set at the same time than `ami.type`.
+ Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"`
+
+ // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type
+ // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at
+ // the same time than `ami.id` and `ami.owner`.
+ Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
+}
+
+type SpecKubernetesNodePoolContainerRuntime string
+
+var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{
+ "docker",
+ "containerd",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
+ for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v)
}
- *j = SpecDistributionModulesMonitoringMimirBackend(v)
+ *j = SpecKubernetesNodePoolContainerRuntime(v)
return nil
}
-var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
- "minio",
- "externalEndpoint",
+const (
+ SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker"
+ SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd"
+)
+
+type SpecKubernetesNodePoolInstanceVolumeType string
+
+var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{
+ "gp2",
+ "gp3",
+ "io1",
+ "standard",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v)
}
- *j = SpecDistributionModulesIngressNginxType(v)
+ *j = SpecKubernetesNodePoolInstanceVolumeType(v)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["self"]; !ok || v == nil {
- return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain)
- return nil
-}
+const (
+ SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2"
+ SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3"
+ SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1"
+ SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard"
+)
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["baseDomain"]; !ok || v == nil {
- return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["certManager"]; !ok || v == nil {
- return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["externalDns"]; !ok || v == nil {
- return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["nginx"]; !ok || v == nil {
- return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
- }
- type Plain SpecDistributionModulesIngress
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngress(plain)
- return nil
+// Configuration for the instances that will be used in the node pool.
+type SpecKubernetesNodePoolInstance struct {
+ // Set the maximum pods per node to a custom value. If not set will use EKS
+ // default value that depends on the instance type.
+ //
+ // Ref:
+ // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt
+ MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"`
+
+ // If `true`, the nodes will be created as spot instances. Default is `false`.
+ Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"`
+
+ // The instance type to use for the nodes.
+ Type string `json:"type" yaml:"type" mapstructure:"type"`
+
+ // The size of the disk in GB.
+ VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"`
+
+ // Volume type for the instance disk. Default is `gp2`.
+ VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3423,43 +3787,12 @@ func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error {
type TypesKubeLabels_1 map[string]string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["audit"]; !ok || v == nil {
- return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["errors"]; !ok || v == nil {
- return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["events"]; !ok || v == nil {
- return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["infra"]; !ok || v == nil {
- return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["ingressNginx"]; !ok || v == nil {
- return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["kubernetes"]; !ok || v == nil {
- return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdCommon"]; !ok || v == nil {
- return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdEtcd"]; !ok || v == nil {
- return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- type Plain SpecDistributionModulesLoggingCustomOutputs
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesLoggingCustomOutputs(plain)
- return nil
+type SpecKubernetesNodePoolSize struct {
+ // The maximum number of nodes in the node pool.
+ Max int `json:"max" yaml:"max" mapstructure:"max"`
+
+ // The minimum number of nodes in the node pool.
+ Min int `json:"min" yaml:"min" mapstructure:"min"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3487,10 +3820,7 @@ type TypesAwsSubnetId string
type TypesKubeTaints []string
-var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
- "minio",
- "externalEndpoint",
-}
+type SpecKubernetesNodePoolType string
var enumValues_SpecKubernetesNodePoolType = []interface{}{
"eks-managed",
@@ -3510,69 +3840,63 @@ func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error {
break
}
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v)
- }
- *j = SpecKubernetesNodePoolType(v)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
- }
- type Plain SpecDistributionModulesLogging
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesLogging(plain)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
- }
- *j = SpecDistributionModulesLoggingLokiBackend(v)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
- }
- type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v)
+ }
+ *j = SpecKubernetesNodePoolType(v)
return nil
}
+const (
+ SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed"
+ SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed"
+)
+
+// Array with all the node pool definitions that will join the cluster. Each item
+// is an object.
+type SpecKubernetesNodePool struct {
+ // AdditionalFirewallRules corresponds to the JSON schema field
+ // "additionalFirewallRules".
+ AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"`
+
+ // Ami corresponds to the JSON schema field "ami".
+ Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"`
+
+ // This optional array defines additional target groups to attach to the instances
+ // in the node pool.
+ AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"`
+
+ // The container runtime to use in the nodes of the node pool. Default is
+ // `containerd`.
+ ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"`
+
+ // Instance corresponds to the JSON schema field "instance".
+ Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"`
+
+ // Kubernetes labels that will be added to the nodes.
+ Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
+
+ // The name of the node pool.
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // Size corresponds to the JSON schema field "size".
+ Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"`
+
+ // Optional list of subnet IDs where to create the nodes.
+ SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
+
+ // AWS tags that will be added to the ASG and EC2 instances.
+ Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+
+ // Kubernetes taints that will be added to the nodes.
+ Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"`
+
+ // The type of Node Pool, can be `self-managed` for using customization like
+ // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from
+ // Amazon via the `ami.type` field. It is recommended to use `self-managed`.
+ Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -3588,6 +3912,9 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error {
if v, ok := raw["size"]; !ok || v == nil {
return fmt.Errorf("field size in SpecKubernetesNodePool: required")
}
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePool: required")
+ }
type Plain SpecKubernetesNodePool
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
@@ -3597,13 +3924,7 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error {
return nil
}
-type TypesKubeResourcesLimits struct {
- // The cpu limit for the opensearch pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
-
- // The memory limit for the opensearch pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
-}
+type SpecKubernetesNodePoolsLaunchKind string
var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{
"launch_configurations",
@@ -3631,28 +3952,66 @@ func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error {
return nil
}
-type TypesKubeResourcesRequests struct {
- // The cpu request for the prometheus pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+const (
+ SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations"
+ SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates"
+ SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both"
+)
- // The memory request for the opensearch pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
-}
+// Defines the Kubernetes components configuration and the values needed for the
+// `kubernetes` phase of furyctl.
+type SpecKubernetes struct {
+ // ApiServer corresponds to the JSON schema field "apiServer".
+ ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"`
-type TypesKubeResources struct {
- // Limits corresponds to the JSON schema field "limits".
- Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
+ // AwsAuth corresponds to the JSON schema field "awsAuth".
+ AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"`
- // Requests corresponds to the JSON schema field "requests".
- Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
-}
+ // Overrides the default prefix for the IAM role name of the EKS cluster. If not
+ // set, a name will be generated from the cluster name.
+ ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"`
-var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
- "single",
- "triple",
-}
+ // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days.
+ // Setting the value to zero (`0`) makes retention last forever. Default is `90`
+ // days.
+ LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"`
-type TypesKubeLabels map[string]string
+ // Optional list of Kubernetes Cluster log types to enable. Defaults to all types.
+ LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"`
+
+ // The SSH public key that can connect to the nodes via SSH using the `ec2-user`
+ // user. Example: the contents of your `~/.ssh/id_ras.pub` file.
+ NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"`
+
+ // Global default AMI type used for EKS worker nodes. This will apply to all node
+ // pools unless overridden by a specific node pool.
+ NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"`
+
+ // NodePools corresponds to the JSON schema field "nodePools".
+ NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"`
+
+ // Accepted values are `launch_configurations`, `launch_templates` or `both`. For
+ // new clusters use `launch_templates`, for adopting an existing cluster you'll
+ // need to migrate from `launch_configurations` to `launch_templates` using `both`
+ // as interim.
+ NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"`
+
+ // This value defines the network CIDR that will be used to assign IP addresses to
+ // Kubernetes services.
+ ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"`
+
+ // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the
+ // ID of the subnet where the EKS cluster will be created.
+ SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
+
+ // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the
+ // ID of the VPC where the EKS cluster and its related resources will be created.
+ VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
+
+ // Overrides the default prefix for the IAM role name of the EKS workers. If not
+ // set, a name will be generated from the cluster name.
+ WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"`
+}
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
@@ -3666,6 +4025,9 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil {
return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required")
}
+ if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil {
+ return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required")
+ }
if v, ok := raw["nodePools"]; !ok || v == nil {
return fmt.Errorf("field nodePools in SpecKubernetes: required")
}
@@ -3681,22 +4043,12 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
- }
- type Plain SpecDistributionModulesLoggingOpensearch
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesLoggingOpensearch(plain)
- return nil
+type SpecPluginsHelmReleasesElemSetElem struct {
+ // The name of the set
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // The value of the set
+ Value string `json:"value" yaml:"value" mapstructure:"value"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3720,24 +4072,28 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
- }
- *j = SpecDistributionModulesLoggingType(v)
- return nil
+type SpecPluginsHelmReleases []struct {
+ // The chart of the release
+ Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
+
+ // Disable running `helm diff` validation when installing the plugin, it will
+ // still be done when upgrading.
+ DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"`
+
+ // The name of the release
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // The namespace of the release
+ Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"`
+
+ // Set corresponds to the JSON schema field "set".
+ Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"`
+
+ // The values of the release
+ Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"`
+
+ // The version of the release
+ Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"`
}
type SpecPluginsHelmRepositories []struct {
@@ -3748,11 +4104,12 @@ type SpecPluginsHelmRepositories []struct {
Url string `json:"url" yaml:"url" mapstructure:"url"`
}
-var enumValues_SpecDistributionModulesLoggingType = []interface{}{
- "none",
- "opensearch",
- "loki",
- "customOutputs",
+type SpecPluginsHelm struct {
+ // Releases corresponds to the JSON schema field "releases".
+ Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"`
+
+ // Repositories corresponds to the JSON schema field "repositories".
+ Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"`
}
type SpecPluginsKustomize []struct {
@@ -3763,50 +4120,31 @@ type SpecPluginsKustomize []struct {
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
- }
- *j = SpecDistributionModulesLoggingOpensearchType(v)
- return nil
+type SpecPlugins struct {
+ // Helm corresponds to the JSON schema field "helm".
+ Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"`
+
+ // Kustomize corresponds to the JSON schema field "kustomize".
+ Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"`
}
type TypesAwsS3KeyPrefix string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["email"]; !ok || v == nil {
- return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- if v, ok := raw["route53"]; !ok || v == nil {
- return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
- return nil
+// Configuration for the S3 bucket used to store the Terraform state.
+type SpecToolsConfigurationTerraformStateS3 struct {
+ // This value defines which bucket will be used to store all the states.
+ BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"`
+
+ // This value defines which folder will be used to store all the states inside the
+ // bucket.
+ KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"`
+
+ // This value defines in which region the bucket is located.
+ Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
+
+ // This value defines if the region of the bucket should be validated or not by
+ // Terraform, useful when using a bucket in a recently added region.
+ SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3833,6 +4171,7 @@ func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error {
return nil
}
+// Configuration for storing the Terraform state of the cluster.
type SpecToolsConfigurationTerraformState struct {
// S3 corresponds to the JSON schema field "s3".
S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"`
@@ -3902,24 +4241,34 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v)
- }
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v)
- return nil
+type Spec struct {
+ // Distribution corresponds to the JSON schema field "distribution".
+ Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"`
+
+ // Defines which KFD version will be installed and, in consequence, the Kubernetes
+ // version used to create the cluster. It supports git tags and branches. Example:
+ // `v1.30.1`.
+ DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"`
+
+ // Infrastructure corresponds to the JSON schema field "infrastructure".
+ Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"`
+
+ // Kubernetes corresponds to the JSON schema field "kubernetes".
+ Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
+
+ // Plugins corresponds to the JSON schema field "plugins".
+ Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"`
+
+ // Defines in which AWS region the cluster and all the related resources will be
+ // created.
+ Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
+
+ // This map defines which will be the common tags that will be added to all the
+ // resources created on AWS.
+ Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
+
+ // Configuration for tools used by furyctl, like Terraform.
+ ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3955,103 +4304,99 @@ func (j *Spec) UnmarshalJSON(b []byte) error {
return nil
}
-var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{
- "create",
- "replace",
- "merge",
-}
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["effect"]; !ok || v == nil {
- return fmt.Errorf("field effect in TypesKubeToleration: required")
- }
- if v, ok := raw["key"]; !ok || v == nil {
- return fmt.Errorf("field key in TypesKubeToleration: required")
- }
- type Plain TypesKubeToleration
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = TypesKubeToleration(plain)
- return nil
-}
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{}
-type TypesKubeToleration struct {
- // Effect corresponds to the JSON schema field "effect".
- Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"`
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{}
- // The key of the toleration
- Key string `json:"key" yaml:"key" mapstructure:"key"`
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{}
- // Operator corresponds to the JSON schema field "operator".
- Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"`
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{}
- // The value of the toleration
- Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"`
-}
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{}
-const (
- TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal"
- TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists"
-)
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_TypesKubeTolerationOperator {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v)
- }
- *j = TypesKubeTolerationOperator(v)
- return nil
-}
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{}
-var enumValues_TypesKubeTolerationOperator = []interface{}{
- "Exists",
- "Equal",
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{}
+
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{}
+
+// override default routes for KFD components
+type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct {
+ // GatekeeperPolicyManager corresponds to the JSON schema field
+ // "gatekeeperPolicyManager".
+ GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"`
+
+ // HubbleUi corresponds to the JSON schema field "hubbleUi".
+ HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"`
+
+ // IngressNgnixForecastle corresponds to the JSON schema field
+ // "ingressNgnixForecastle".
+ IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"`
+
+ // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole".
+ LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"`
+
+ // LoggingOpensearchDashboards corresponds to the JSON schema field
+ // "loggingOpensearchDashboards".
+ LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"`
+
+ // MonitoringAlertmanager corresponds to the JSON schema field
+ // "monitoringAlertmanager".
+ MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"`
+
+ // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana".
+ MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"`
+
+ // MonitoringMinioConsole corresponds to the JSON schema field
+ // "monitoringMinioConsole".
+ MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"`
+
+ // MonitoringPrometheus corresponds to the JSON schema field
+ // "monitoringPrometheus".
+ MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"`
+
+ // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole".
+ TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"`
}
-type TypesKubeTolerationOperator string
+// Pomerium needs some user-provided secrets to be fully configured. These secrets
+// should be unique between clusters.
+type SpecDistributionModulesAuthPomeriumSecrets struct {
+ // Cookie Secret is the secret used to encrypt and sign session cookies.
+ //
+ // To generate a random key, run the following command: `head -c32 /dev/urandom |
+ // base64`
+ COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"`
-const (
- TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute"
- TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule"
- TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule"
-)
+ // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth
+ // type is SSO, this value will be the secret used to authenticate Pomerium with
+ // Dex, **use a strong random value**.
+ IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_TypesKubeTolerationEffect {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v)
- }
- *j = TypesKubeTolerationEffect(v)
- return nil
+ // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate
+ // requests between Pomerium services. It's critical that secret keys are random,
+ // and stored safely.
+ //
+ // To generate a key, run the following command: `head -c32 /dev/urandom | base64`
+ SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"`
+
+ // Signing Key is the base64 representation of one or more PEM-encoded private
+ // keys used to sign a user's attestation JWT, which can be consumed by upstream
+ // applications to pass along identifying user information like username, id, and
+ // groups.
+ //
+ // To generates an P-256 (ES256) signing key:
+ //
+ // ```bash
+ // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem
+ // # careful! this will output your private key in terminal
+ // cat ec_private.pem | base64
+ // ```
+ SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -4111,13 +4456,8 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error {
return nil
}
-var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
- "create",
- "replace",
- "merge",
-}
-
const (
+ TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule"
TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule"
TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute"
)
@@ -4200,13 +4540,26 @@ type TypesFuryModuleComponentOverrides_1 struct {
Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
-var enumValues_TypesKubeTolerationEffect = []interface{}{
- "NoSchedule",
- "PreferNoSchedule",
- "NoExecute",
-}
+type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{}
-type TypesKubeTolerationEffect string
+// Configuration for Pomerium, an identity-aware reverse proxy used for SSO.
+type SpecDistributionModulesAuthPomerium_2 struct {
+ // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy".
+ DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // DEPRECATED: Use defaultRoutesPolicy and/or routes
+ Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"`
+
+ // Additional routes configuration for Pomerium. Follows Pomerium's route format:
+ // https://www.pomerium.com/docs/reference/routes
+ Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"`
+
+ // Secrets corresponds to the JSON schema field "secrets".
+ Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"`
+}
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error {
@@ -4240,23 +4593,7 @@ type TypesSshPubKey string
type TypesUri string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionCommonProvider: required")
- }
- type Plain SpecDistributionCommonProvider
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionCommonProvider(plain)
- return nil
-}
+type EksclusterKfdV1Alpha2Kind string
var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{
"EKSCluster",
@@ -4282,30 +4619,21 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error {
return nil
}
-type TypesKubeNodeSelector map[string]string
+const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster"
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *Metadata) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in Metadata: required")
- }
- type Plain Metadata
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- if len(plain.Name) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "name", 1)
- }
- if len(plain.Name) > 56 {
- return fmt.Errorf("field %s length: must be <= %d", "name", 56)
- }
- *j = Metadata(plain)
- return nil
+// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).
+type EksclusterKfdV1Alpha2 struct {
+ // ApiVersion corresponds to the JSON schema field "apiVersion".
+ ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"`
+
+ // Kind corresponds to the JSON schema field "kind".
+ Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"`
+
+ // Metadata corresponds to the JSON schema field "metadata".
+ Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"`
+
+ // Spec corresponds to the JSON schema field "spec".
+ Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"`
}
// UnmarshalJSON implements json.Unmarshaler.
diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go
index 76fada170..f63a2cc65 100644
--- a/pkg/apis/ekscluster/v1alpha2/public/schema.go
+++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go
@@ -6,9 +6,11 @@ import (
"encoding/json"
"fmt"
"reflect"
+
+ "github.com/sighupio/go-jsonschema/pkg/types"
)
-// A Fury Cluster deployed through AWS's Elastic Kubernetes Service
+// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).
type EksclusterKfdV1Alpha2 struct {
// ApiVersion corresponds to the JSON schema field "apiVersion".
ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"`
@@ -28,7 +30,8 @@ type EksclusterKfdV1Alpha2Kind string
const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster"
type Metadata struct {
- // Name corresponds to the JSON schema field "name".
+ // The name of the cluster. It will also be used as a prefix for all the other
+ // resources created.
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
@@ -36,7 +39,9 @@ type Spec struct {
// Distribution corresponds to the JSON schema field "distribution".
Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"`
- // DistributionVersion corresponds to the JSON schema field "distributionVersion".
+ // Defines which KFD version will be installed and, in consequence, the Kubernetes
+ // version used to create the cluster. It supports git tags and branches. Example:
+ // `v1.30.1`.
DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"`
// Infrastructure corresponds to the JSON schema field "infrastructure".
@@ -48,14 +53,15 @@ type Spec struct {
// Plugins corresponds to the JSON schema field "plugins".
Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"`
- // Region corresponds to the JSON schema field "region".
+ // Defines in which AWS region the cluster and all the related resources will be
+ // created.
Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
// This map defines which will be the common tags that will be added to all the
// resources created on AWS.
Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
- // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration".
+ // Configuration for tools used by furyctl, like Terraform.
ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"`
}
@@ -70,29 +76,38 @@ type SpecDistribution struct {
Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"`
}
+// Common configuration for all the distribution modules.
type SpecDistributionCommon struct {
- // The node selector to use to place the pods for all the KFD modules
+ // The node selector to use to place the pods for all the KFD modules. Follows
+ // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// Provider corresponds to the JSON schema field "provider".
Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"`
// URL of the registry where to pull images from for the Distribution phase.
- // (Default is registry.sighup.io/fury).
+ // (Default is `registry.sighup.io/fury`).
//
// NOTE: If plugins are pulling from the default registry, the registry will be
- // replaced for these plugins too.
+ // replaced for the plugin too.
Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
- // The relative path to the vendor directory, does not need to be changed
+ // The relative path to the vendor directory, does not need to be changed.
RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"`
- // The tolerations that will be added to the pods for all the KFD modules
+ // An array with the tolerations that will be added to the pods for all the KFD
+ // modules. Follows Kubernetes tolerations format. Example:
+ //
+ // ```yaml
+ // - effect: NoSchedule
+ // key: node.kubernetes.io/role
+ // value: infra
+ // ```
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type SpecDistributionCommonProvider struct {
- // The type of the provider, must be EKS if specified
+ // The provider type. Don't set. FOR INTERNAL USE ONLY.
Type string `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -294,8 +309,11 @@ type SpecDistributionModules struct {
Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"`
}
+// Configuration for the Auth module.
type SpecDistributionModulesAuth struct {
- // The base domain for the auth module
+ // The base domain for the ingresses created by the Auth module (Gangplank,
+ // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will
+ // use the `external` ingress class.
BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"`
// Dex corresponds to the JSON schema field "dex".
@@ -311,11 +329,25 @@ type SpecDistributionModulesAuth struct {
Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
}
+// Configuration for the Dex package.
type SpecDistributionModulesAuthDex struct {
- // The additional static clients for dex
+ // Additional static clients defitions that will be added to the default clients
+ // included with the distribution in Dex's configuration. Example:
+ //
+ // ```yaml
+ // additionalStaticClients:
+ // - id: my-custom-client
+ // name: "A custom additional static client"
+ // redirectURIs:
+ // - "https://myapp.tld/redirect"
+ // - "https://alias.tld/oidc-callback"
+ // secret: supersecretpassword
+ // ```
+ // Reference: https://dexidp.io/docs/connectors/local/
AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"`
- // The connectors for dex
+ // A list with each item defining a Dex connector. Follows Dex connectors
+ // configuration format: https://dexidp.io/docs/connectors/
Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"`
// Expiry corresponds to the JSON schema field "expiry".
@@ -333,25 +365,29 @@ type SpecDistributionModulesAuthDexExpiry struct {
SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"`
}
+// Override the common configuration with a particular configuration for the Auth
+// module.
type SpecDistributionModulesAuthOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
+ // Override the definition of the Auth module ingresses.
Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // The node selector to use to place the pods for the auth module
+ // Set to override the node selector used to place the pods of the Auth module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the auth module
+ // Set to override the tolerations that will be added to the pods of the Auth
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type SpecDistributionModulesAuthOverridesIngress struct {
- // The host of the ingress
+ // Use this host for the ingress instead of the default one.
Host string `json:"host" yaml:"host" mapstructure:"host"`
- // The ingress class of the ingress
+ // Use this ingress class for the ingress instead of the default one.
IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"`
}
+// Override the definition of the Auth module ingresses.
type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress
type SpecDistributionModulesAuthPomerium interface{}
@@ -476,15 +512,23 @@ type SpecDistributionModulesAuthProvider struct {
// BasicAuth corresponds to the JSON schema field "basicAuth".
BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"`
- // The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
+ // The type of the Auth provider, options are:
+ // - `none`: will disable authentication in the infrastructural ingresses.
+ // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO)
+ // and require authentication before accessing them.
+ // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth
+ // (username and password) authentication.
+ //
+ // Default is `none`.
Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"`
}
+// Configuration for the HTTP Basic Auth provider.
type SpecDistributionModulesAuthProviderBasicAuth struct {
- // The password for the basic auth
+ // The password for logging in with the HTTP basic authentication.
Password string `json:"password" yaml:"password" mapstructure:"password"`
- // The username for the basic auth
+ // The username for logging in with the HTTP basic authentication.
Username string `json:"username" yaml:"username" mapstructure:"username"`
}
@@ -535,11 +579,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct {
Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the Disaster Recovery module.
type SpecDistributionModulesDr struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The type of the DR, must be ***none*** or ***eks***
+ // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the
+ // module and `eks` will install Velero and use an S3 bucket to store the
+ // backups.
+ //
+ // Default is `none`.
Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"`
// Velero corresponds to the JSON schema field "velero".
@@ -565,21 +614,44 @@ type SpecDistributionModulesDrVelero struct {
}
type SpecDistributionModulesDrVeleroEks struct {
- // The name of the velero bucket
+ // The name of the bucket for Velero.
BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"`
- // The region where the velero bucket is located
+ // The region where the bucket for Velero will be located.
Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
}
// Configuration for Velero's backup schedules.
type SpecDistributionModulesDrVeleroSchedules struct {
- // Configuration for Velero's schedules cron.
- Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"`
+ // Configuration for Velero schedules.
+ Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"`
// Whether to install or not the default `manifests` and `full` backups schedules.
// Default is `true`.
Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
+}
+
+// Configuration for Velero schedules.
+type SpecDistributionModulesDrVeleroSchedulesDefinitions struct {
+ // Configuration for Velero's manifests backup schedule.
+ Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
+
+ // Configuration for Velero's manifests backup schedule.
+ Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+}
+
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct {
+ // The cron expression for the `full` backup schedule (default `0 1 * * *`).
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // EXPERIMENTAL (if you do more than one backups, the following backups after the
+ // first are not automatically restorable, see
+ // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for
+ // the manual restore solution): SnapshotMoveData specifies whether snapshot data
+ // should be moved. Velero will create a new volume from the snapshot and upload
+ // the content to the storageLocation.
+ SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"`
// The Time To Live (TTL) of the backups created by the backup schedules (default
// `720h0m0s`, 30 days). Notice that changing this value will affect only newly
@@ -587,23 +659,28 @@ type SpecDistributionModulesDrVeleroSchedules struct {
Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
}
-// Configuration for Velero's schedules cron.
-type SpecDistributionModulesDrVeleroSchedulesCron struct {
- // The cron expression for the `full` backup schedule (default `0 1 * * *`).
- Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
-
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct {
// The cron expression for the `manifests` backup schedule (default `*/15 * * *
// *`).
- Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // The Time To Live (TTL) of the backups created by the backup schedules (default
+ // `720h0m0s`, 30 days). Notice that changing this value will affect only newly
+ // created backups, prior backups will keep the old TTL.
+ Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
}
type SpecDistributionModulesIngress struct {
- // the base domain used for all the KFD ingresses, if in the nginx dual
- // configuration, it should be the same as the
- // .spec.distribution.modules.ingress.dns.private.name zone
+ // The base domain used for all the KFD infrastructural ingresses. If in the nginx
+ // `dual` configuration type, this value should be the same as the
+ // `.spec.distribution.modules.ingress.dns.private.name` zone.
BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"`
- // CertManager corresponds to the JSON schema field "certManager".
+ // Configuration for the cert-manager package. Required even if
+ // `ingress.nginx.type` is `none`, cert-manager is used for managing other
+ // certificates in the cluster besides the TLS termination certificates for the
+ // ingresses.
CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"`
// Dns corresponds to the JSON schema field "dns".
@@ -612,13 +689,17 @@ type SpecDistributionModulesIngress struct {
// Forecastle corresponds to the JSON schema field "forecastle".
Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
- // Configurations for the nginx ingress controller module
+ // Configurations for the Ingress nginx controller package.
Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the cert-manager package. Required even if
+// `ingress.nginx.type` is `none`, cert-manager is used for managing other
+// certificates in the cluster besides the TLS termination certificates for the
+// ingresses.
type SpecDistributionModulesIngressCertManager struct {
// ClusterIssuer corresponds to the JSON schema field "clusterIssuer".
ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"`
@@ -627,17 +708,23 @@ type SpecDistributionModulesIngressCertManager struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the cert-manager's ACME clusterIssuer used to request
+// certificates from Let's Encrypt.
type SpecDistributionModulesIngressCertManagerClusterIssuer struct {
- // The email of the cluster issuer
+ // The email address to use during the certificate issuing process.
Email string `json:"email" yaml:"email" mapstructure:"email"`
- // The name of the cluster issuer
+ // The name of the clusterIssuer.
Name string `json:"name" yaml:"name" mapstructure:"name"`
- // The custom solvers configurations
+ // The list of challenge solvers to use instead of the default one for the
+ // `http01` challenge. Check [cert manager's
+ // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types)
+ // for examples for this field.
Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"`
- // The type of the cluster issuer, must be ***dns01*** or ***http01***
+ // The type of the clusterIssuer, must be `dns01` for using DNS challenge or
+ // `http01` for using HTTP challenge.
Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
}
@@ -648,6 +735,8 @@ const (
SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01"
)
+// DNS definition, used in conjunction with `externalDNS` package to automate DNS
+// management and certificates emission.
type SpecDistributionModulesIngressDNS struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -659,19 +748,23 @@ type SpecDistributionModulesIngressDNS struct {
Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"`
}
+// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for
+// exposing infrastructural services only in the private DNS zone.
type SpecDistributionModulesIngressDNSPrivate struct {
- // If true, the private hosted zone will be created
+ // By default, a Terraform data source will be used to get the private DNS zone.
+ // Set to `true` to create the private zone instead.
Create bool `json:"create" yaml:"create" mapstructure:"create"`
- // The name of the private hosted zone
+ // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`.
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
type SpecDistributionModulesIngressDNSPublic struct {
- // If true, the public hosted zone will be created
+ // By default, a Terraform data source will be used to get the public DNS zone.
+ // Set to `true` to create the public zone instead.
Create bool `json:"create" yaml:"create" mapstructure:"create"`
- // The name of the public hosted zone
+ // The name of the public hosted zone.
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
@@ -687,14 +780,24 @@ type SpecDistributionModulesIngressNginx struct {
// Tls corresponds to the JSON schema field "tls".
Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"`
- // The type of the nginx ingress controller, must be ***none***, ***single*** or
- // ***dual***
+ // The type of the Ingress nginx controller, options are:
+ // - `none`: no ingress controller will be installed and no infrastructural
+ // ingresses will be created.
+ // - `single`: a single ingress controller with ingress class `nginx` will be
+ // installed to manage all the ingress resources, infrastructural ingresses will
+ // be created.
+ // - `dual`: two independent ingress controllers will be installed, one for the
+ // `internal` ingress class intended for private ingresses and one for the
+ // `external` ingress class intended for public ingresses. KFD infrastructural
+ // ingresses wil use the `internal` ingress class when using the dual type.
+ //
+ // Default is `single`.
Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"`
}
type SpecDistributionModulesIngressNginxTLS struct {
- // The provider of the TLS certificate, must be ***none***, ***certManager*** or
- // ***secret***
+ // The provider of the TLS certificates for the ingresses, one of: `none`,
+ // `certManager`, or `secret`.
Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
// Secret corresponds to the JSON schema field "secret".
@@ -709,15 +812,18 @@ const (
SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret"
)
+// Kubernetes TLS secret for the ingresses TLS certificate.
type SpecDistributionModulesIngressNginxTLSSecret struct {
- // Ca corresponds to the JSON schema field "ca".
+ // The Certificate Authority certificate file's content. You can use the
+ // `"{file://}"` notation to get the content from a file.
Ca string `json:"ca" yaml:"ca" mapstructure:"ca"`
- // The certificate file content or you can use the file notation to get the
- // content from a file
+ // The certificate file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
Cert string `json:"cert" yaml:"cert" mapstructure:"cert"`
- // Key corresponds to the JSON schema field "key".
+ // The signing key file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
Key string `json:"key" yaml:"key" mapstructure:"key"`
}
@@ -729,14 +835,17 @@ const (
SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single"
)
+// Override the common configuration with a particular configuration for the
+// Ingress module.
type SpecDistributionModulesIngressOverrides struct {
// Ingresses corresponds to the JSON schema field "ingresses".
Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // The node selector to use to place the pods for the ingress module
+ // Set to override the node selector used to place the pods of the Ingress module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the ingress module
+ // Set to override the tolerations that will be added to the pods of the Ingress
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
@@ -745,6 +854,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct {
Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
}
+// Configuration for the Logging module.
type SpecDistributionModulesLogging struct {
// Cerebro corresponds to the JSON schema field "cerebro".
Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"`
@@ -767,83 +877,104 @@ type SpecDistributionModulesLogging struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // selects the logging stack. Choosing none will disable the centralized logging.
- // Choosing opensearch will deploy and configure the Logging Operator and an
+ // Selects the logging stack. Options are:
+ // - `none`: will disable the centralized logging.
+ // - `opensearch`: will deploy and configure the Logging Operator and an
// OpenSearch cluster (can be single or triple for HA) where the logs will be
- // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh
- // for storage. Choosing customOuput the Logging Operator will be deployed and
- // installed but with no local storage, you will have to create the needed Outputs
- // and ClusterOutputs to ship the logs to your desired storage.
+ // stored.
+ // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for
+ // storage.
+ // - `customOuputs`: the Logging Operator will be deployed and installed but
+ // without in-cluster storage, you will have to create the needed Outputs and
+ // ClusterOutputs to ship the logs to your desired storage.
+ //
+ // Default is `opensearch`.
Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"`
}
+// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
type SpecDistributionModulesLoggingCerebro struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-// when using the customOutputs logging type, you need to manually specify the spec
-// of the several Output and ClusterOutputs that the Logging Operator expects to
-// forward the logs collected by the pre-defined flows.
+// When using the `customOutputs` logging type, you need to manually specify the
+// spec of the several `Output` and `ClusterOutputs` that the Logging Operator
+// expects to forward the logs collected by the pre-defined flows.
type SpecDistributionModulesLoggingCustomOutputs struct {
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `audit` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Audit string `json:"audit" yaml:"audit" mapstructure:"audit"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `errors` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Errors string `json:"errors" yaml:"errors" mapstructure:"errors"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `events` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Events string `json:"events" yaml:"events" mapstructure:"events"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `infra` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Infra string `json:"infra" yaml:"infra" mapstructure:"infra"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `ingressNginx` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `kubernetes` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `systemdCommon` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `systemdEtcd` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"`
}
+// Configuration for the Loki package.
type SpecDistributionModulesLoggingLoki struct {
- // Backend corresponds to the JSON schema field "backend".
+ // The storage backend type for Loki. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external object storage instead of deploying an in-cluster MinIO.
Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
+ // Configuration for Loki's external storage backend.
ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the
+ // time series database from BoltDB to TSDB and the schema from v11 to v13 that it
+ // uses to store the logs.
+ //
+ // The value of this field will determine the date when Loki will start writing
+ // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB
+ // and schema will be kept until they expire for reading purposes.
+ //
+ // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example:
+ // `2024-11-18`.
+ TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"`
}
type SpecDistributionModulesLoggingLokiBackend string
@@ -853,23 +984,25 @@ const (
SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio"
)
+// Configuration for Loki's external storage backend.
type SpecDistributionModulesLoggingLokiExternalEndpoint struct {
- // The access key id of the loki external endpoint
+ // The access key ID (username) for the external S3-compatible bucket.
AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
- // The bucket name of the loki external endpoint
+ // The bucket name of the external S3-compatible object storage.
BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
- // The endpoint of the loki external endpoint
+ // External S3-compatible endpoint for Loki's storage.
Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
- // If true, the loki external endpoint will be insecure
+ // If true, will use HTTP as protocol instead of HTTPS.
Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
- // The secret access key of the loki external endpoint
+ // The secret access key (password) for the external S3-compatible bucket.
SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
+// Configuration for Logging's MinIO deployment.
type SpecDistributionModulesLoggingMinio struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -877,15 +1010,15 @@ type SpecDistributionModulesLoggingMinio struct {
// RootUser corresponds to the JSON schema field "rootUser".
RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
- // The PVC size for each minio disk, 6 disks total
+ // The PVC size for each MinIO disk, 6 disks total.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
type SpecDistributionModulesLoggingMinioRootUser struct {
- // The password of the minio root user
+ // The password for the default MinIO root user.
Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
- // The username of the minio root user
+ // The username for the default MinIO root user.
Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
}
@@ -896,10 +1029,12 @@ type SpecDistributionModulesLoggingOpensearch struct {
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
- // The storage size for the opensearch pods
+ // The storage size for the OpenSearch volumes. Follows Kubernetes resources
+ // storage requests. Default is `150Gi`.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
- // The type of the opensearch, must be ***single*** or ***triple***
+ // The type of OpenSearch deployment. One of: `single` for a single replica or
+ // `triple` for an HA 3-replicas deployment.
Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -910,6 +1045,7 @@ const (
SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple"
)
+// Configuration for the Logging Operator.
type SpecDistributionModulesLoggingOperator struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -924,7 +1060,7 @@ const (
SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch"
)
-// configuration for the Monitoring module components
+// Configuration for the Monitoring module.
type SpecDistributionModulesMonitoring struct {
// Alertmanager corresponds to the JSON schema field "alertmanager".
Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"`
@@ -953,22 +1089,23 @@ type SpecDistributionModulesMonitoring struct {
// PrometheusAgent corresponds to the JSON schema field "prometheusAgent".
PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"`
- // The type of the monitoring, must be ***none***, ***prometheus***,
- // ***prometheusAgent*** or ***mimir***.
+ // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or
+ // `mimir`.
//
// - `none`: will disable the whole monitoring stack.
// - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
// instance, Alertmanager, a set of alert rules, exporters needed to monitor all
// the components of the cluster, Grafana and a series of dashboards to view the
// collected metrics, and more.
- // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus
- // in Agent mode (no alerting, no queries, no storage), and all the exporters
- // needed to get metrics for the status of the cluster and the workloads. Useful
- // when having a centralized (remote) Prometheus where to ship the metrics and not
- // storing them locally in the cluster.
- // - `mimir`: will install the same as the `prometheus` option, and in addition
- // Grafana Mimir that allows for longer retention of metrics and the usage of
- // Object Storage.
+ // - `prometheusAgent`: will install Prometheus operator, an instance of
+ // Prometheus in Agent mode (no alerting, no queries, no storage), and all the
+ // exporters needed to get metrics for the status of the cluster and the
+ // workloads. Useful when having a centralized (remote) Prometheus where to ship
+ // the metrics and not storing them locally in the cluster.
+ // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir
+ // that allows for longer retention of metrics and the usage of Object Storage.
+ //
+ // Default is `prometheus`.
Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
// X509Exporter corresponds to the JSON schema field "x509Exporter".
@@ -976,14 +1113,15 @@ type SpecDistributionModulesMonitoring struct {
}
type SpecDistributionModulesMonitoringAlertManager struct {
- // The webhook url to send deadman switch monitoring, for example to use with
- // healthchecks.io
+ // The webhook URL to send dead man's switch monitoring, for example to use with
+ // healthchecks.io.
DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
- // If true, the default rules will be installed
+ // Set to false to avoid installing the Prometheus rules (alerts) included with
+ // the distribution.
InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"`
- // The slack webhook url to send alerts
+ // The Slack webhook URL where to send the infrastructural and workload alerts to.
SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"`
}
@@ -1022,17 +1160,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the Mimir package.
type SpecDistributionModulesMonitoringMimir struct {
- // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
+ // The storage backend type for Mimir. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
+ // Configuration for Mimir's external storage backend.
ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The retention time for the mimir pods
+ // The retention time for the logs stored in Mimir. Default is `30d`. Value must
+ // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365
+ // days.
RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
}
@@ -1043,23 +1186,25 @@ const (
SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio"
)
+// Configuration for Mimir's external storage backend.
type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
- // The access key id of the external mimir backend
+ // The access key ID (username) for the external S3-compatible bucket.
AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
- // The bucket name of the external mimir backend
+ // The bucket name of the external S3-compatible object storage.
BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
- // The endpoint of the external mimir backend
+ // The external S3-compatible endpoint for Mimir's storage.
Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
- // If true, the external mimir backend will not use tls
+ // If true, will use HTTP as protocol instead of HTTPS.
Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
- // The secret access key of the external mimir backend
+ // The secret access key (password) for the external S3-compatible bucket.
SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
+// Configuration for Monitoring's MinIO deployment.
type SpecDistributionModulesMonitoringMinio struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -1067,15 +1212,15 @@ type SpecDistributionModulesMonitoringMinio struct {
// RootUser corresponds to the JSON schema field "rootUser".
RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
- // The storage size for the minio pods
+ // The PVC size for each MinIO disk, 6 disks total.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
type SpecDistributionModulesMonitoringMinioRootUser struct {
- // The password for the minio root user
+ // The password for the default MinIO root user.
Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
- // The username for the minio root user
+ // The username for the default MinIO root user.
Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
}
@@ -1092,13 +1237,13 @@ type SpecDistributionModulesMonitoringPrometheus struct {
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
- // The retention size for the k8s Prometheus instance.
+ // The retention size for the `k8s` Prometheus instance.
RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
- // The retention time for the k8s Prometheus instance.
+ // The retention time for the `k8s` Prometheus instance.
RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
- // The storage size for the k8s Prometheus instance.
+ // The storage size for the `k8s` Prometheus instance.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
@@ -1134,9 +1279,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the Networking module.
type SpecDistributionModulesNetworking struct {
// Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
// TigeraOperator corresponds to the JSON schema field "tigeraOperator".
TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
@@ -1147,6 +1293,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the Policy module.
type SpecDistributionModulesPolicy struct {
// Gatekeeper corresponds to the JSON schema field "gatekeeper".
Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
@@ -1157,20 +1304,27 @@ type SpecDistributionModulesPolicy struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The type of security to use, either ***none***, ***gatekeeper*** or
- // ***kyverno***
+ // The type of policy enforcement to use, either `none`, `gatekeeper` or
+ // `kyverno`.
+ //
+ // Default is `none`.
Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
}
+// Configuration for the Gatekeeper package.
type SpecDistributionModulesPolicyGatekeeper struct {
// This parameter adds namespaces to Gatekeeper's exemption list, so it will not
// enforce the constraints on them.
AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
- // The enforcement action to use for the gatekeeper module
+ // The default enforcement action to use for the included constraints. `deny` will
+ // block the admission when violations to the policies are found, `warn` will show
+ // a message to the user but will admit the violating requests and `dryrun` won't
+ // give any feedback to the user but it will log the violations.
EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
- // If true, the default policies will be installed
+ // Set to `false` to avoid installing the default Gatekeeper policies (constraints
+ // templates and constraints) included with the distribution.
InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
// Overrides corresponds to the JSON schema field "overrides".
@@ -1185,18 +1339,22 @@ const (
SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
)
+// Configuration for the Kyverno package.
type SpecDistributionModulesPolicyKyverno struct {
// This parameter adds namespaces to Kyverno's exemption list, so it will not
- // enforce the constraints on them.
+ // enforce the policies on them.
AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
- // If true, the default policies will be installed
+ // Set to `false` to avoid installing the default Kyverno policies included with
+ // distribution.
InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The validation failure action to use for the kyverno module
+ // The validation failure action to use for the policies, `Enforce` will block
+ // when a request does not comply with the policies and `Audit` will not block but
+ // log when a request does not comply with the policies.
ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
}
@@ -1215,6 +1373,7 @@ const (
SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
)
+// Configuration for the Tracing module.
type SpecDistributionModulesTracing struct {
// Minio corresponds to the JSON schema field "minio".
Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
@@ -1225,10 +1384,14 @@ type SpecDistributionModulesTracing struct {
// Tempo corresponds to the JSON schema field "tempo".
Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
- // The type of tracing to use, either ***none*** or ***tempo***
+ // The type of tracing to use, either `none` or `tempo`. `none` will disable the
+ // Tracing module and `tempo` will install a Grafana Tempo deployment.
+ //
+ // Default is `tempo`.
Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
}
+// Configuration for Tracing's MinIO deployment.
type SpecDistributionModulesTracingMinio struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -1236,29 +1399,32 @@ type SpecDistributionModulesTracingMinio struct {
// RootUser corresponds to the JSON schema field "rootUser".
RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
- // The storage size for the minio pods
+ // The PVC size for each MinIO disk, 6 disks total.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
type SpecDistributionModulesTracingMinioRootUser struct {
- // The password for the minio root user
+ // The password for the default MinIO root user.
Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
- // The username for the minio root user
+ // The username for the default MinIO root user.
Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
}
+// Configuration for the Tempo package.
type SpecDistributionModulesTracingTempo struct {
- // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
+ // The storage backend type for Tempo. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
+ // Configuration for Tempo's external storage backend.
ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The retention time for the tempo pods
+ // The retention time for the traces stored in Tempo.
RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
}
@@ -1269,20 +1435,21 @@ const (
SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
)
+// Configuration for Tempo's external storage backend.
type SpecDistributionModulesTracingTempoExternalEndpoint struct {
- // The access key id of the external tempo backend
+ // The access key ID (username) for the external S3-compatible bucket.
AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
- // The bucket name of the external tempo backend
+ // The bucket name of the external S3-compatible object storage.
BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
- // The endpoint of the external tempo backend
+ // The external S3-compatible endpoint for Tempo's storage.
Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
- // If true, the external tempo backend will not use tls
+ // If true, will use HTTP as protocol instead of HTTPS.
Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
- // The secret access key of the external tempo backend
+ // The secret access key (password) for the external S3-compatible bucket.
SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
@@ -1294,88 +1461,98 @@ const (
)
type SpecInfrastructure struct {
- // This key defines the VPC that will be created in AWS
+ // Vpc corresponds to the JSON schema field "vpc".
Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"`
- // This section defines the creation of VPN bastions
+ // Vpn corresponds to the JSON schema field "vpn".
Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"`
}
+// Configuration for the VPC that will be created to host the EKS cluster and its
+// related resources. If you already have a VPC that you want to use, leave this
+// section empty and use `.spec.kubernetes.vpcId` instead.
type SpecInfrastructureVpc struct {
// Network corresponds to the JSON schema field "network".
Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"`
}
type SpecInfrastructureVpcNetwork struct {
- // This is the CIDR of the VPC that will be created
+ // The network CIDR for the VPC that will be created
Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"`
// SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs".
SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"`
}
+// Network CIDRS configuration for private and public subnets.
type SpecInfrastructureVpcNetworkSubnetsCidrs struct {
- // These are the CIRDs for the private subnets, where the nodes, the pods, and the
+ // The network CIDRs for the private subnets, where the nodes, the pods, and the
// private load balancers will be created
Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"`
- // These are the CIDRs for the public subnets, where the public load balancers and
+ // The network CIDRs for the public subnets, where the public load balancers and
// the VPN servers will be created
Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"`
}
+// Configuration for the VPN server instances.
type SpecInfrastructureVpn struct {
- // This value defines the prefix that will be used to create the bucket name where
- // the VPN servers will store the states
+ // This value defines the prefix for the bucket name where the VPN servers will
+ // store their state (VPN certificates, users).
BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"`
- // The dhParamsBits size used for the creation of the .pem file that will be used
- // in the dh openvpn server.conf file
+ // The `dhParamsBits` size used for the creation of the .pem file that will be
+ // used in the dh openvpn server.conf file.
DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"`
- // The size of the disk in GB
+ // The size of the disk in GB for each VPN server. Example: entering `50` will
+ // create disks of 50 GB.
DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"`
- // Overrides the default IAM user name for the VPN
+ // Overrides IAM user name for the VPN. Default is to use the cluster name.
IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"`
- // The size of the AWS EC2 instance
+ // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2
+ // nomenclature. Example: `t3-micro`.
InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"`
- // The number of instances to create, 0 to skip the creation
+ // The number of VPN server instances to create, `0` to skip the creation.
Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"`
- // The username of the account to create in the bastion's operating system
+ // The username of the account to create in the bastion's operating system.
OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"`
- // The port used by the OpenVPN server
+ // The port where each OpenVPN server will listen for connections.
Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"`
// Ssh corresponds to the JSON schema field "ssh".
Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"`
- // The VPC ID where the VPN servers will be created, required only if
- // .spec.infrastructure.vpc is omitted
+ // The ID of the VPC where the VPN server instances will be created, required only
+ // if `.spec.infrastructure.vpc` is omitted.
VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
- // The CIDR that will be used to assign IP addresses to the VPN clients when
- // connected
+ // The network CIDR that will be used to assign IP addresses to the VPN clients
+ // when connected.
VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"`
}
type SpecInfrastructureVpnSsh struct {
- // The CIDR enabled in the security group that can access the bastions in SSH
+ // The network CIDR enabled in the security group to access the VPN servers
+ // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source.
AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"`
- // The github user name list that will be used to get the ssh public key that will
- // be added as authorized key to the operatorName user
+ // List of GitHub usernames from whom get their SSH public key and add as
+ // authorized keys of the `operatorName` user.
GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"`
- // This value defines the public keys that will be added to the bastion's
- // operating system NOTES: Not yet implemented
+ // **NOT IN USE**, use `githubUsersName` instead. This value defines the public
+ // keys that will be added to the bastion's operating system.
PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"`
}
+// Defines the Kubernetes components configuration and the values needed for the
+// `kubernetes` phase of furyctl.
type SpecKubernetes struct {
// ApiServer corresponds to the JSON schema field "apiServer".
ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"`
@@ -1383,71 +1560,85 @@ type SpecKubernetes struct {
// AwsAuth corresponds to the JSON schema field "awsAuth".
AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"`
- // Overrides the default IAM role name prefix for the EKS cluster
+ // Overrides the default prefix for the IAM role name of the EKS cluster. If not
+ // set, a name will be generated from the cluster name.
ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"`
- // Optional Kubernetes Cluster log retention in days. Defaults to 90 days.
- LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"`
+ // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days.
+ // Setting the value to zero (`0`) makes retention last forever. Default is `90`
+ // days.
+ LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"`
// Optional list of Kubernetes Cluster log types to enable. Defaults to all types.
LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"`
- // This key contains the ssh public key that can connect to the nodes via SSH
- // using the ec2-user user
+ // The SSH public key that can connect to the nodes via SSH using the `ec2-user`
+ // user. Example: the contents of your `~/.ssh/id_ras.pub` file.
NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"`
+ // Global default AMI type used for EKS worker nodes. This will apply to all node
+ // pools unless overridden by a specific node pool.
+ NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"`
+
// NodePools corresponds to the JSON schema field "nodePools".
NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"`
- // Either `launch_configurations`, `launch_templates` or `both`. For new clusters
- // use `launch_templates`, for existing cluster you'll need to migrate from
- // `launch_configurations` to `launch_templates` using `both` as interim.
+ // Accepted values are `launch_configurations`, `launch_templates` or `both`. For
+ // new clusters use `launch_templates`, for adopting an existing cluster you'll
+ // need to migrate from `launch_configurations` to `launch_templates` using `both`
+ // as interim.
NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"`
- // This value defines the CIDR that will be used to assign IP addresses to the
- // services
+ // This value defines the network CIDR that will be used to assign IP addresses to
+ // Kubernetes services.
ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"`
- // This value defines the subnet IDs where the EKS cluster will be created,
- // required only if .spec.infrastructure.vpc is omitted
+ // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the
+ // ID of the subnet where the EKS cluster will be created.
SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
- // This value defines the VPC ID where the EKS cluster will be created, required
- // only if .spec.infrastructure.vpc is omitted
+ // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the
+ // ID of the VPC where the EKS cluster and its related resources will be created.
VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"`
- // Overrides the default IAM role name prefix for the EKS workers
+ // Overrides the default prefix for the IAM role name of the EKS workers. If not
+ // set, a name will be generated from the cluster name.
WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"`
}
type SpecKubernetesAPIServer struct {
- // This value defines if the API server will be accessible only from the private
- // subnets
+ // This value defines if the Kubernetes API server will be accessible from the
+ // private subnets. Default it `true`.
PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"`
- // This value defines the CIDRs that will be allowed to access the API server from
- // the private subnets
+ // The network CIDRs from the private subnets that will be allowed access the
+ // Kubernetes API server.
PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"`
- // This value defines if the API server will be accessible from the public subnets
+ // This value defines if the Kubernetes API server will be accessible from the
+ // public subnets. Default is `false`.
PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"`
- // This value defines the CIDRs that will be allowed to access the API server from
- // the public subnets
+ // The network CIDRs from the public subnets that will be allowed access the
+ // Kubernetes API server.
PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"`
}
+// Optional additional security configuration for EKS IAM via the `aws-auth`
+// configmap.
+//
+// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html
type SpecKubernetesAwsAuth struct {
// This optional array defines additional AWS accounts that will be added to the
- // aws-auth configmap
+ // `aws-auth` configmap.
AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"`
// This optional array defines additional IAM roles that will be added to the
- // aws-auth configmap
+ // `aws-auth` configmap.
Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"`
// This optional array defines additional IAM users that will be added to the
- // aws-auth configmap
+ // `aws-auth` configmap.
Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
}
@@ -1473,6 +1664,8 @@ type SpecKubernetesAwsAuthUser struct {
Username string `json:"username" yaml:"username" mapstructure:"username"`
}
+type SpecKubernetesLogRetentionDays int
+
type SpecKubernetesLogsTypesElem string
const (
@@ -1483,6 +1676,8 @@ const (
SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler"
)
+// Array with all the node pool definitions that will join the cluster. Each item
+// is an object.
type SpecKubernetesNodePool struct {
// AdditionalFirewallRules corresponds to the JSON schema field
// "additionalFirewallRules".
@@ -1492,35 +1687,38 @@ type SpecKubernetesNodePool struct {
Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"`
// This optional array defines additional target groups to attach to the instances
- // in the node pool
+ // in the node pool.
AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"`
- // The container runtime to use for the nodes
+ // The container runtime to use in the nodes of the node pool. Default is
+ // `containerd`.
ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"`
// Instance corresponds to the JSON schema field "instance".
Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"`
- // Kubernetes labels that will be added to the nodes
+ // Kubernetes labels that will be added to the nodes.
Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"`
- // The name of the node pool
+ // The name of the node pool.
Name string `json:"name" yaml:"name" mapstructure:"name"`
// Size corresponds to the JSON schema field "size".
Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"`
- // This value defines the subnet IDs where the nodes will be created
+ // Optional list of subnet IDs where to create the nodes.
SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"`
- // AWS tags that will be added to the ASG and EC2 instances
+ // AWS tags that will be added to the ASG and EC2 instances.
Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
- // Kubernetes taints that will be added to the nodes
+ // Kubernetes taints that will be added to the nodes.
Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"`
- // Type corresponds to the JSON schema field "type".
- Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
+ // The type of Node Pool, can be `self-managed` for using customization like
+ // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from
+ // Amazon via the `ami.type` field. It is recommended to use `self-managed`.
+ Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"`
}
type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct {
@@ -1536,10 +1734,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct {
// Protocol corresponds to the JSON schema field "protocol".
Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
- // Tags corresponds to the JSON schema field "tags".
+ // Additional AWS tags for the Firewall rule.
Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
- // Type corresponds to the JSON schema field "type".
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -1550,6 +1749,7 @@ const (
SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress"
)
+// Port range for the Firewall Rule.
type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct {
// From corresponds to the JSON schema field "from".
From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"`
@@ -1559,22 +1759,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct {
}
type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct {
- // The name of the FW rule
+ // The name of the Firewall rule.
Name string `json:"name" yaml:"name" mapstructure:"name"`
// Ports corresponds to the JSON schema field "ports".
Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
- // The protocol of the FW rule
+ // The protocol of the Firewall rule.
Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
- // If true, the source will be the security group itself
+ // If `true`, the source will be the security group itself.
Self bool `json:"self" yaml:"self" mapstructure:"self"`
- // The tags of the FW rule
+ // Additional AWS tags for the Firewall rule.
Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
- // The type of the FW rule can be ingress or egress
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -1586,22 +1787,23 @@ const (
)
type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct {
- // The name of the FW rule
+ // The name for the additional Firewall rule Security Group.
Name string `json:"name" yaml:"name" mapstructure:"name"`
// Ports corresponds to the JSON schema field "ports".
Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"`
- // The protocol of the FW rule
+ // The protocol of the Firewall rule.
Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"`
- // The source security group ID
+ // The source security group ID.
SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"`
- // The tags of the FW rule
+ // Additional AWS tags for the Firewall rule.
Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"`
- // The type of the FW rule can be ingress or egress
+ // The type of the Firewall rule, can be `ingress` for incoming traffic or
+ // `egress` for outgoing traffic.
Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -1612,9 +1814,11 @@ const (
SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress"
)
+// Optional additional firewall rules that will be attached to the nodes.
type SpecKubernetesNodePoolAdditionalFirewallRules struct {
- // The CIDR blocks for the FW rule. At the moment the first item of the list will
- // be used, others will be ignored.
+ // The CIDR blocks objects definition for the Firewall rule. Even though it is a
+ // list, only one item is currently supported. See
+ // https://github.com/sighupio/fury-eks-installer/issues/46 for more details.
CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"`
// Self corresponds to the JSON schema field "self".
@@ -1625,14 +1829,36 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct {
SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"`
}
+// Configuration for customize the Amazon Machine Image (AMI) for the machines of
+// the Node Pool.
+//
+// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields
+// for using a custom AMI (just with `self-managed` node pool type) or by setting
+// the `ami.type` field to one of the official AMIs based on Amazon Linux.
type SpecKubernetesNodePoolAmi struct {
- // The AMI ID to use for the nodes
- Id string `json:"id" yaml:"id" mapstructure:"id"`
+ // The ID of the AMI to use for the nodes, must be set toghether with the `owner`
+ // field. `ami.id` and `ami.owner` can be only set when Node Pool type is
+ // `self-managed` and they can't be set at the same time than `ami.type`.
+ Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"`
+
+ // The owner of the AMI to use for the nodes, must be set toghether with the `id`
+ // field. `ami.id` and `ami.owner` can be only set when Node Pool type is
+ // `self-managed` and they can't be set at the same time than `ami.type`.
+ Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"`
- // The owner of the AMI
- Owner string `json:"owner" yaml:"owner" mapstructure:"owner"`
+ // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type
+ // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at
+ // the same time than `ami.id` and `ami.owner`.
+ Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
}
+type SpecKubernetesNodePoolAmiType string
+
+const (
+ SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2"
+ SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023"
+)
+
type SpecKubernetesNodePoolContainerRuntime string
const (
@@ -1640,20 +1866,32 @@ const (
SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker"
)
+type SpecKubernetesNodePoolGlobalAmiType string
+
+const (
+ SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2"
+ SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023"
+)
+
+// Configuration for the instances that will be used in the node pool.
type SpecKubernetesNodePoolInstance struct {
- // MaxPods corresponds to the JSON schema field "maxPods".
+ // Set the maximum pods per node to a custom value. If not set will use EKS
+ // default value that depends on the instance type.
+ //
+ // Ref:
+ // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt
MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"`
- // If true, the nodes will be created as spot instances
+ // If `true`, the nodes will be created as spot instances. Default is `false`.
Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"`
- // The instance type to use for the nodes
+ // The instance type to use for the nodes.
Type string `json:"type" yaml:"type" mapstructure:"type"`
- // The size of the disk in GB
+ // The size of the disk in GB.
VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"`
- // VolumeType corresponds to the JSON schema field "volumeType".
+ // Volume type for the instance disk. Default is `gp2`.
VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"`
}
@@ -1667,10 +1905,10 @@ const (
)
type SpecKubernetesNodePoolSize struct {
- // The maximum number of nodes in the node pool
+ // The maximum number of nodes in the node pool.
Max int `json:"max" yaml:"max" mapstructure:"max"`
- // The minimum number of nodes in the node pool
+ // The minimum number of nodes in the node pool.
Min int `json:"min" yaml:"min" mapstructure:"min"`
}
@@ -1709,6 +1947,10 @@ type SpecPluginsHelmReleases []struct {
// The chart of the release
Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
+ // Disable running `helm diff` validation when installing the plugin, it will
+ // still be done when upgrading.
+ DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"`
+
// The name of the release
Name string `json:"name" yaml:"name" mapstructure:"name"`
@@ -1759,24 +2001,26 @@ type SpecToolsConfigurationTerraform struct {
State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"`
}
+// Configuration for storing the Terraform state of the cluster.
type SpecToolsConfigurationTerraformState struct {
// S3 corresponds to the JSON schema field "s3".
S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"`
}
+// Configuration for the S3 bucket used to store the Terraform state.
type SpecToolsConfigurationTerraformStateS3 struct {
- // This value defines which bucket will be used to store all the states
+ // This value defines which bucket will be used to store all the states.
BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"`
// This value defines which folder will be used to store all the states inside the
- // bucket
+ // bucket.
KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"`
- // This value defines in which region the bucket is located
+ // This value defines in which region the bucket is located.
Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"`
// This value defines if the region of the bucket should be validated or not by
- // Terraform, useful when using a bucket in a recently added region
+ // Terraform, useful when using a bucket in a recently added region.
SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"`
}
@@ -1843,10 +2087,10 @@ type TypesEnvRef string
type TypesFileRef string
type TypesFuryModuleComponentOverrides struct {
- // The node selector to use to place the pods for the minio module
+ // Set to override the node selector used to place the pods of the package.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the cert-manager module
+ // Set to override the tolerations that will be added to the pods of the package.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
@@ -1855,11 +2099,11 @@ type TypesFuryModuleComponentOverridesWithIAMRoleName struct {
IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"`
// The node selector to use to place the pods for the load balancer controller
- // module
+ // module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// The tolerations that will be added to the pods for the cluster autoscaler
- // module
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
@@ -1871,25 +2115,28 @@ type TypesFuryModuleComponentOverrides_1 struct {
Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
+// Override the common configuration with a particular configuration for the
+// module.
type TypesFuryModuleOverrides struct {
// Ingresses corresponds to the JSON schema field "ingresses".
Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // The node selector to use to place the pods for the dr module
+ // Set to override the node selector used to place the pods of the module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the monitoring module
+ // Set to override the tolerations that will be added to the pods of the module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type TypesFuryModuleOverridesIngress struct {
- // If true, the ingress will not have authentication
+ // If true, the ingress will not have authentication even if
+ // `.spec.modules.auth.provider.type` is SSO or Basic Auth.
DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
- // The host of the ingress
+ // Use this host for the ingress instead of the default one.
Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
- // The ingress class of the ingress
+ // Use this ingress class for the ingress instead of the default one.
IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
}
@@ -1914,18 +2161,18 @@ type TypesKubeResources struct {
}
type TypesKubeResourcesLimits struct {
- // The cpu limit for the opensearch pods
+ // The CPU limit for the Pod. Example: `1000m`.
Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
- // The memory limit for the opensearch pods
+ // The memory limit for the Pod. Example: `1G`.
Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
}
type TypesKubeResourcesRequests struct {
- // The cpu request for the prometheus pods
+ // The CPU request for the Pod, in cores. Example: `500m`.
Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
- // The memory request for the opensearch pods
+ // The memory request for the Pod. Example: `500M`.
Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
}
@@ -2087,115 +2334,179 @@ var enumValues_SpecDistributionModulesPolicyType = []interface{}{
"kyverno",
}
+var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
+}
+
+var enumValues_SpecDistributionModulesTracingType = []interface{}{
+ "none",
+ "tempo",
+}
+
+var enumValues_SpecKubernetesLogRetentionDays = []interface{}{
+ 0,
+ 1,
+ 3,
+ 5,
+ 7,
+ 14,
+ 30,
+ 60,
+ 90,
+ 120,
+ 150,
+ 180,
+ 365,
+ 400,
+ 545,
+ 731,
+ 1096,
+ 1827,
+ 2192,
+ 2557,
+ 2922,
+ 3288,
+ 3653,
+}
+
+var enumValues_SpecKubernetesLogsTypesElem = []interface{}{
+ "api",
+ "audit",
+ "authenticator",
+ "controllerManager",
+ "scheduler",
+}
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{
+ "ingress",
+ "egress",
+}
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{
+ "ingress",
+ "egress",
+}
+
+var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{
+ "ingress",
+ "egress",
+}
+
+var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{
+ "alinux2",
+ "alinux2023",
+}
+
+var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{
+ "docker",
+ "containerd",
+}
+
+var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{
+ "alinux2",
+ "alinux2023",
+}
+
+var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{
+ "gp2",
+ "gp3",
+ "io1",
+ "standard",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionCommonProvider: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
}
- type Plain SpecDistributionCommonProvider
+ type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionCommonProvider(plain)
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType {
+ for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v)
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
return nil
}
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{
- "ingress",
- "egress",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
+ return fmt.Errorf("field type in SpecDistributionModulesDr: required")
}
- type Plain SpecDistributionModulesAuthProvider
+ type Plain SpecDistributionModulesDr
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProvider(plain)
+ *j = SpecDistributionModulesDr(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil {
- return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ if v, ok := raw["eks"]; !ok || v == nil {
+ return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required")
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId
+ type Plain SpecDistributionModulesDrVelero
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain)
+ *j = SpecDistributionModulesDrVelero(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["from"]; !ok || v == nil {
- return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
+ if v, ok := raw["bucketName"]; !ok || v == nil {
+ return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required")
}
- if v, ok := raw["to"]; !ok || v == nil {
- return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
+ if v, ok := raw["region"]; !ok || v == nil {
+ return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required")
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts
+ type Plain SpecDistributionModulesDrVeleroEks
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain)
+ *j = SpecDistributionModulesDrVeleroEks(plain)
return nil
}
@@ -2213,6 +2524,9 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte)
if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
}
+ if len(plain.CidrBlocks) > 1 {
+ return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1)
+ }
if plain.Self != nil && len(plain.Self) < 1 {
return fmt.Errorf("field %s length: must be >= %d", "self", 1)
}
@@ -2224,229 +2538,213 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte)
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v)
+ }
+ *j = SpecKubernetesNodePoolContainerRuntime(v)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
+ if v, ok := raw["email"]; !ok || v == nil {
+ return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
}
- type Plain SpecDistributionModulesAuth
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuth(plain)
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["id"]; !ok || v == nil {
- return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ }
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ }
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
+ }
+ if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil {
+ return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
}
- if v, ok := raw["owner"]; !ok || v == nil {
- return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required")
}
- type Plain SpecKubernetesNodePoolAmi
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesNodePoolAmi(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["clusterIssuer"]; !ok || v == nil {
+ return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManager
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressCertManager(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesLogsTypesElem {
+ for _, expected := range enumValues_SpecDistributionModulesTracingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
}
- *j = SpecKubernetesLogsTypesElem(v)
+ *j = SpecDistributionModulesTracingType(v)
return nil
}
-var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{
- "docker",
- "containerd",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime {
+ for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v)
}
- *j = SpecKubernetesNodePoolContainerRuntime(v)
+ *j = SpecKubernetesNodePoolInstanceVolumeType(v)
return nil
}
-var enumValues_SpecKubernetesLogsTypesElem = []interface{}{
- "api",
- "audit",
- "authenticator",
- "controllerManager",
- "scheduler",
-}
-
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{
- "ingress",
- "egress",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["create"]; !ok || v == nil {
+ return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required")
}
- *j = SpecDistributionModulesDrType(v)
+ type Plain SpecDistributionModulesIngressDNSPrivate
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressDNSPrivate(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["bucketName"]; !ok || v == nil {
- return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required")
+ if v, ok := raw["create"]; !ok || v == nil {
+ return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required")
}
- if v, ok := raw["region"]; !ok || v == nil {
- return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required")
}
- type Plain SpecDistributionModulesDrVeleroEks
+ type Plain SpecDistributionModulesIngressDNSPublic
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDrVeleroEks(plain)
+ *j = SpecDistributionModulesIngressDNSPublic(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType {
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v)
- }
- *j = SpecKubernetesNodePoolInstanceVolumeType(v)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["groups"]; !ok || v == nil {
- return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required")
- }
- if v, ok := raw["userarn"]; !ok || v == nil {
- return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required")
- }
- type Plain SpecKubernetesAwsAuthUser
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecKubernetesAwsAuthUser(plain)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["groups"]; !ok || v == nil {
- return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required")
- }
- if v, ok := raw["rolearn"]; !ok || v == nil {
- return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required")
- }
- type Plain SpecKubernetesAwsAuthRole
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v)
}
- *j = SpecKubernetesAwsAuthRole(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["privateAccess"]; !ok || v == nil {
- return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required")
- }
- if v, ok := raw["publicAccess"]; !ok || v == nil {
- return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required")
+ var ok bool
+ for _, expected := range enumValues_TypesAwsRegion {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecKubernetesAPIServer
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v)
}
- *j = SpecKubernetesAPIServer(plain)
+ *j = TypesAwsRegion(v)
return nil
}
@@ -2482,26 +2780,6 @@ var enumValues_TypesAwsRegion = []interface{}{
"us-west-2",
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_TypesAwsRegion {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v)
- }
- *j = TypesAwsRegion(v)
- return nil
-}
-
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2521,125 +2799,135 @@ func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ssh"]; !ok || v == nil {
- return fmt.Errorf("field ssh in SpecInfrastructureVpn: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
}
- if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil {
- return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required")
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
}
- type Plain SpecInfrastructureVpn
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["self"]; !ok || v == nil {
+ return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ }
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpn(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["allowedFromCidrs"]; !ok || v == nil {
- return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required")
- }
- if v, ok := raw["githubUsersName"]; !ok || v == nil {
- return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required")
- }
- type Plain SpecInfrastructureVpnSsh
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1)
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
}
- *j = SpecInfrastructureVpnSsh(plain)
+ *j = SpecDistributionModulesIngressNginxTLSProvider(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["eks"]; !ok || v == nil {
- return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required")
+ if v, ok := raw["max"]; !ok || v == nil {
+ return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required")
}
- type Plain SpecDistributionModulesDrVelero
+ if v, ok := raw["min"]; !ok || v == nil {
+ return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required")
+ }
+ type Plain SpecKubernetesNodePoolSize
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDrVelero(plain)
+ *j = SpecKubernetesNodePoolSize(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["network"]; !ok || v == nil {
- return fmt.Errorf("field network in SpecInfrastructureVpc: required")
+ if v, ok := raw["ca"]; !ok || v == nil {
+ return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
}
- type Plain SpecInfrastructureVpc
+ if v, ok := raw["cert"]; !ok || v == nil {
+ return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ type Plain SpecDistributionModulesIngressNginxTLSSecret
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpc(plain)
+ *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["cidr"]; !ok || v == nil {
- return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required")
- }
- if v, ok := raw["subnetsCidrs"]; !ok || v == nil {
- return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required")
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
}
- type Plain SpecInfrastructureVpcNetwork
+ type Plain SpecDistributionModulesIngressNginxTLS
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecInfrastructureVpcNetwork(plain)
+ *j = SpecDistributionModulesIngressNginxTLS(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["private"]; !ok || v == nil {
- return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
- }
- if v, ok := raw["public"]; !ok || v == nil {
- return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesDrType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecInfrastructureVpcNetworkSubnetsCidrs
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
}
- *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain)
+ *j = SpecDistributionModulesDrType(v)
return nil
}
@@ -2669,65 +2957,75 @@ func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["modules"]; !ok || v == nil {
- return fmt.Errorf("field modules in SpecDistribution: required")
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistribution
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v)
}
- *j = SpecDistribution(plain)
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["dr"]; !ok || v == nil {
- return fmt.Errorf("field dr in SpecDistributionModules: required")
+ if v, ok := raw["cidrBlocks"]; !ok || v == nil {
+ return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- if v, ok := raw["ingress"]; !ok || v == nil {
- return fmt.Errorf("field ingress in SpecDistributionModules: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- if v, ok := raw["logging"]; !ok || v == nil {
- return fmt.Errorf("field logging in SpecDistributionModules: required")
+ if v, ok := raw["ports"]; !ok || v == nil {
+ return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- if v, ok := raw["policy"]; !ok || v == nil {
- return fmt.Errorf("field policy in SpecDistributionModules: required")
+ if v, ok := raw["protocol"]; !ok || v == nil {
+ return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
}
- type Plain SpecDistributionModules
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ }
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModules(plain)
+ if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
}
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
+ *j = SpecDistributionModulesIngressNginxType(v)
return nil
}
@@ -2746,6 +3044,9 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error {
if v, ok := raw["size"]; !ok || v == nil {
return fmt.Errorf("field size in SpecKubernetesNodePool: required")
}
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecKubernetesNodePool: required")
+ }
type Plain SpecKubernetesNodePool
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
@@ -2756,20 +3057,20 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
+ return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
}
- type Plain SpecDistributionModulesTracing
+ type Plain SpecDistributionModulesIngressNginx
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesTracing(plain)
+ *j = SpecDistributionModulesIngressNginx(plain)
return nil
}
@@ -2800,55 +3101,82 @@ func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["baseDomain"]; !ok || v == nil {
+ return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
+ if v, ok := raw["nginx"]; !ok || v == nil {
+ return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
}
- *j = SpecDistributionModulesTracingType(v)
+ type Plain SpecDistributionModulesIngress
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngress(plain)
return nil
}
-var enumValues_SpecDistributionModulesTracingType = []interface{}{
- "none",
- "tempo",
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
+ }
+ type Plain SpecDistributionModulesAuth
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuth(plain)
+ return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
+ }
+ type Plain SpecDistributionModulesAuthProvider
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthProvider(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
}
- *j = SpecDistributionModulesTracingTempoBackend(v)
+ *j = SpecDistributionModulesAuthProviderType(v)
return nil
}
-var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
- "minio",
- "externalEndpoint",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2861,6 +3189,9 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil {
return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required")
}
+ if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil {
+ return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required")
+ }
if v, ok := raw["nodePools"]; !ok || v == nil {
return fmt.Errorf("field nodePools in SpecKubernetes: required")
}
@@ -2877,122 +3208,181 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v)
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required")
+ }
+ if v, ok := raw["value"]; !ok || v == nil {
+ return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required")
+ }
+ type Plain SpecPluginsHelmReleasesElemSetElem
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecPluginsHelmReleasesElemSetElem(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["password"]; !ok || v == nil {
+ return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
+ }
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ }
+ type Plain SpecDistributionModulesAuthProviderBasicAuth
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
+ if v, ok := raw["host"]; !ok || v == nil {
+ return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
}
- type Plain SpecDistributionModulesPolicy
+ if v, ok := raw["ingressClass"]; !ok || v == nil {
+ return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ }
+ type Plain SpecDistributionModulesAuthOverridesIngress
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicy(plain)
+ *j = SpecDistributionModulesAuthOverridesIngress(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required")
- }
- if v, ok := raw["value"]; !ok || v == nil {
- return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required")
+ if v, ok := raw["connectors"]; !ok || v == nil {
+ return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
}
- type Plain SpecPluginsHelmReleasesElemSetElem
+ type Plain SpecDistributionModulesAuthDex
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecPluginsHelmReleasesElemSetElem(plain)
+ *j = SpecDistributionModulesAuthDex(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["audit"]; !ok || v == nil {
+ return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
+ if v, ok := raw["errors"]; !ok || v == nil {
+ return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
}
- *j = SpecDistributionModulesPolicyType(v)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
+ if v, ok := raw["events"]; !ok || v == nil {
+ return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["infra"]; !ok || v == nil {
+ return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
+ if v, ok := raw["ingressNginx"]; !ok || v == nil {
+ return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
}
- *j = SpecDistributionModulesLoggingType(v)
+ if v, ok := raw["kubernetes"]; !ok || v == nil {
+ return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdCommon"]; !ok || v == nil {
+ return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdEtcd"]; !ok || v == nil {
+ return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ type Plain SpecDistributionModulesLoggingCustomOutputs
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesLoggingCustomOutputs(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
- }
- if v, ok := raw["validationFailureAction"]; !ok || v == nil {
- return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
}
- type Plain SpecDistributionModulesPolicyKyverno
+ type Plain SpecDistributionCustomPatchesSecretGeneratorResource
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyKyverno(plain)
+ *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["from"]; !ok || v == nil {
+ return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
+ if v, ok := raw["to"]; !ok || v == nil {
+ return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required")
}
- *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
+ type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain)
return nil
}
@@ -3017,71 +3407,46 @@ func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) Unmarshal
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
+func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["enforcementAction"]; !ok || v == nil {
- return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
+ if v, ok := raw["bucketName"]; !ok || v == nil {
+ return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required")
}
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
+ if v, ok := raw["keyPrefix"]; !ok || v == nil {
+ return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required")
}
- type Plain SpecDistributionModulesPolicyGatekeeper
+ if v, ok := raw["region"]; !ok || v == nil {
+ return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required")
+ }
+ type Plain SpecToolsConfigurationTerraformStateS3
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyGatekeeper(plain)
+ *j = SpecToolsConfigurationTerraformStateS3(plain)
return nil
}
-var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{
- "gp2",
- "gp3",
- "io1",
- "standard",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v)
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["max"]; !ok || v == nil {
- return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required")
- }
- if v, ok := raw["min"]; !ok || v == nil {
- return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required")
- }
- type Plain SpecKubernetesNodePoolSize
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
}
- *j = SpecKubernetesNodePoolSize(plain)
+ *j = SpecDistributionModulesLoggingLokiBackend(v)
return nil
}
@@ -3104,22 +3469,22 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
+ for _, expected := range enumValues_SpecKubernetesNodePoolAmiType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v)
}
- *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
+ *j = SpecKubernetesNodePoolAmiType(v)
return nil
}
@@ -3142,32 +3507,22 @@ func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["self"]; !ok || v == nil {
- return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required")
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v)
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain)
+ *j = SpecKubernetesNodePoolGlobalAmiType(v)
return nil
}
@@ -3243,20 +3598,20 @@ func (j *Spec) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ if v, ok := raw["tsdbStartDate"]; !ok || v == nil {
+ return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required")
}
- type Plain SpecDistributionModulesDr
+ type Plain SpecDistributionModulesLoggingLoki
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDr(plain)
+ *j = SpecDistributionModulesLoggingLoki(plain)
return nil
}
@@ -3282,58 +3637,60 @@ func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
- }
- type Plain SpecDistributionCustomPatchesSecretGeneratorResource
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
}
- *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
+ *j = SpecDistributionModulesLoggingOpensearchType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
+ }
+ type Plain SpecDistributionModulesLoggingOpensearch
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesLoggingOpensearch(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesLogsTypesElem {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesMonitoring
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v)
}
- *j = SpecDistributionModulesMonitoring(plain)
+ *j = SpecKubernetesLogsTypesElem(v)
return nil
}
@@ -3363,83 +3720,80 @@ var enumValues_TypesKubeTolerationOperator = []interface{}{
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["email"]; !ok || v == nil {
- return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesLoggingType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
}
- *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
+ *j = SpecDistributionModulesLoggingType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error {
+ var v int
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["clusterIssuer"]; !ok || v == nil {
- return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
+ var ok bool
+ for _, expected := range enumValues_SpecKubernetesLogRetentionDays {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesIngressCertManager
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v)
}
- *j = SpecDistributionModulesIngressCertManager(plain)
+ *j = SpecKubernetesLogRetentionDays(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["create"]; !ok || v == nil {
- return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
}
- type Plain SpecDistributionModulesIngressDNSPrivate
+ type Plain SpecDistributionModulesLogging
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressDNSPrivate(plain)
+ *j = SpecDistributionModulesLogging(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["create"]; !ok || v == nil {
- return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesIngressDNSPublic
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
}
- *j = SpecDistributionModulesIngressDNSPublic(plain)
+ *j = SpecDistributionModulesMonitoringMimirBackend(v)
return nil
}
@@ -3491,42 +3845,50 @@ func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) err
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["groups"]; !ok || v == nil {
+ return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
+ if v, ok := raw["userarn"]; !ok || v == nil {
+ return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required")
}
- *j = SpecDistributionModulesAuthProviderType(v)
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required")
+ }
+ type Plain SpecKubernetesAwsAuthUser
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecKubernetesAwsAuthUser(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["groups"]; !ok || v == nil {
+ return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
+ if v, ok := raw["rolearn"]; !ok || v == nil {
+ return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required")
}
- *j = SpecDistributionModulesIngressNginxTLSProvider(v)
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required")
+ }
+ type Plain SpecKubernetesAwsAuthRole
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecKubernetesAwsAuthRole(plain)
return nil
}
@@ -3577,62 +3939,61 @@ func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["connectors"]; !ok || v == nil {
- return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
}
- type Plain SpecDistributionModulesAuthDex
+ type Plain SpecDistributionModulesMonitoring
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthDex(plain)
+ *j = SpecDistributionModulesMonitoring(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ca"]; !ok || v == nil {
- return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
- }
- if v, ok := raw["cert"]; !ok || v == nil {
- return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["privateAccess"]; !ok || v == nil {
+ return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required")
}
- if v, ok := raw["key"]; !ok || v == nil {
- return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["publicAccess"]; !ok || v == nil {
+ return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required")
}
- type Plain SpecDistributionModulesIngressNginxTLSSecret
+ type Plain SpecKubernetesAPIServer
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
+ *j = SpecKubernetesAPIServer(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesIngressNginxTLS
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
}
- *j = SpecDistributionModulesIngressNginxTLS(plain)
+ *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
return nil
}
@@ -3662,60 +4023,68 @@ func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["ssh"]; !ok || v == nil {
+ return fmt.Errorf("field ssh in SpecInfrastructureVpn: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v)
+ if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil {
+ return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required")
}
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v)
+ type Plain SpecInfrastructureVpn
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecInfrastructureVpn(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["enforcementAction"]; !ok || v == nil {
+ return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
}
- *j = SpecDistributionModulesIngressNginxType(v)
+ type Plain SpecDistributionModulesPolicyGatekeeper
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesPolicyGatekeeper(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
+ if v, ok := raw["allowedFromCidrs"]; !ok || v == nil {
+ return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required")
}
- type Plain SpecDistributionModulesIngressNginx
+ if v, ok := raw["githubUsersName"]; !ok || v == nil {
+ return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required")
+ }
+ type Plain SpecInfrastructureVpnSsh
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginx(plain)
+ if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 {
+ return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1)
+ }
+ *j = SpecInfrastructureVpnSsh(plain)
return nil
}
@@ -3744,23 +4113,20 @@ func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["baseDomain"]; !ok || v == nil {
- return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["nginx"]; !ok || v == nil {
- return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ if v, ok := raw["network"]; !ok || v == nil {
+ return fmt.Errorf("field network in SpecInfrastructureVpc: required")
}
- type Plain SpecDistributionModulesIngress
+ type Plain SpecInfrastructureVpc
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngress(plain)
+ *j = SpecInfrastructureVpc(plain)
return nil
}
@@ -3771,41 +4137,22 @@ var enumValues_TypesKubeTolerationEffect = []interface{}{
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["audit"]; !ok || v == nil {
- return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["errors"]; !ok || v == nil {
- return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["events"]; !ok || v == nil {
- return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["infra"]; !ok || v == nil {
- return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["ingressNginx"]; !ok || v == nil {
- return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["kubernetes"]; !ok || v == nil {
- return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdCommon"]; !ok || v == nil {
- return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdEtcd"]; !ok || v == nil {
- return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesLoggingCustomOutputs
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
}
- *j = SpecDistributionModulesLoggingCustomOutputs(plain)
+ *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
return nil
}
@@ -3827,185 +4174,185 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error {
return nil
}
-var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{
- "ingress",
- "egress",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["cidr"]; !ok || v == nil {
+ return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
+ if v, ok := raw["subnetsCidrs"]; !ok || v == nil {
+ return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required")
}
- *j = SpecDistributionModulesLoggingLokiBackend(v)
+ type Plain SpecInfrastructureVpcNetwork
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecInfrastructureVpcNetwork(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
+ if v, ok := raw["validationFailureAction"]; !ok || v == nil {
+ return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
}
- *j = SpecDistributionModulesMonitoringMimirBackend(v)
+ type Plain SpecDistributionModulesPolicyKyverno
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesPolicyKyverno(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["host"]; !ok || v == nil {
- return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["private"]; !ok || v == nil {
+ return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
}
- if v, ok := raw["ingressClass"]; !ok || v == nil {
- return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["public"]; !ok || v == nil {
+ return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required")
}
- type Plain SpecDistributionModulesAuthOverridesIngress
+ type Plain SpecInfrastructureVpcNetworkSubnetsCidrs
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthOverridesIngress(plain)
+ *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error {
+func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["cidrBlocks"]; !ok || v == nil {
- return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["ports"]; !ok || v == nil {
- return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["protocol"]; !ok || v == nil {
- return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required")
+ if v, ok := raw["modules"]; !ok || v == nil {
+ return fmt.Errorf("field modules in SpecDistribution: required")
}
- type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock
+ type Plain SpecDistribution
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 {
- return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1)
- }
- *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain)
+ *j = SpecDistribution(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
}
- *j = SpecDistributionModulesLoggingOpensearchType(v)
+ *j = SpecDistributionModulesPolicyType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
+ if v, ok := raw["dr"]; !ok || v == nil {
+ return fmt.Errorf("field dr in SpecDistributionModules: required")
}
- type Plain SpecDistributionModulesLoggingOpensearch
+ if v, ok := raw["ingress"]; !ok || v == nil {
+ return fmt.Errorf("field ingress in SpecDistributionModules: required")
+ }
+ if v, ok := raw["logging"]; !ok || v == nil {
+ return fmt.Errorf("field logging in SpecDistributionModules: required")
+ }
+ if v, ok := raw["policy"]; !ok || v == nil {
+ return fmt.Errorf("field policy in SpecDistributionModules: required")
+ }
+ type Plain SpecDistributionModules
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLoggingOpensearch(plain)
+ *j = SpecDistributionModules(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["bucketName"]; !ok || v == nil {
- return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
}
- if v, ok := raw["keyPrefix"]; !ok || v == nil {
- return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required")
+ type Plain SpecDistributionModulesPolicy
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- if v, ok := raw["region"]; !ok || v == nil {
- return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required")
+ *j = SpecDistributionModulesPolicy(plain)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
}
- type Plain SpecToolsConfigurationTerraformStateS3
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionCommonProvider: required")
+ }
+ type Plain SpecDistributionCommonProvider
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecToolsConfigurationTerraformStateS3(plain)
+ *j = SpecDistributionCommonProvider(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["password"]; !ok || v == nil {
- return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
}
- type Plain SpecDistributionModulesAuthProviderBasicAuth
+ type Plain SpecDistributionModulesTracing
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ *j = SpecDistributionModulesTracing(plain)
return nil
}
@@ -4030,20 +4377,22 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesLogging
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
}
- *j = SpecDistributionModulesLogging(plain)
+ *j = SpecDistributionModulesTracingTempoBackend(v)
return nil
}
diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go
index 9a4f9ca9e..e8f0ddf11 100644
--- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go
+++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go
@@ -6,8 +6,11 @@ import (
"encoding/json"
"fmt"
"reflect"
+
+ "github.com/sighupio/go-jsonschema/pkg/types"
)
+// KFD modules deployed on top of an existing Kubernetes cluster.
type KfddistributionKfdV1Alpha2 struct {
// ApiVersion corresponds to the JSON schema field "apiVersion".
ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"`
@@ -27,7 +30,8 @@ type KfddistributionKfdV1Alpha2Kind string
const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution"
type Metadata struct {
- // Name corresponds to the JSON schema field "name".
+ // The name of the cluster. It will also be used as a prefix for all the other
+ // resources created.
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
@@ -35,7 +39,9 @@ type Spec struct {
// Distribution corresponds to the JSON schema field "distribution".
Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"`
- // DistributionVersion corresponds to the JSON schema field "distributionVersion".
+ // Defines which KFD version will be installed and, in consequence, the Kubernetes
+ // version used to create the cluster. It supports git tags and branches. Example:
+ // `v1.30.1`.
DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"`
// Plugins corresponds to the JSON schema field "plugins".
@@ -49,36 +55,45 @@ type SpecDistribution struct {
// CustomPatches corresponds to the JSON schema field "customPatches".
CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"`
- // The kubeconfig file path
+ // The path to the kubeconfig file.
Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"`
// Modules corresponds to the JSON schema field "modules".
Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"`
}
+// Common configuration for all the distribution modules.
type SpecDistributionCommon struct {
- // The node selector to use to place the pods for all the KFD modules
+ // The node selector to use to place the pods for all the KFD modules. Follows
+ // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// Provider corresponds to the JSON schema field "provider".
Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"`
// URL of the registry where to pull images from for the Distribution phase.
- // (Default is registry.sighup.io/fury).
+ // (Default is `registry.sighup.io/fury`).
//
// NOTE: If plugins are pulling from the default registry, the registry will be
// replaced for the plugin too.
Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
- // The relative path to the vendor directory, does not need to be changed
+ // The relative path to the vendor directory, does not need to be changed.
RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"`
- // The tolerations that will be added to the pods for all the KFD modules
+ // An array with the tolerations that will be added to the pods for all the KFD
+ // modules. Follows Kubernetes tolerations format. Example:
+ //
+ // ```yaml
+ // - effect: NoSchedule
+ // key: node.kubernetes.io/role
+ // value: infra
+ // ```
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type SpecDistributionCommonProvider struct {
- // The type of the provider
+ // The provider type. Don't set. FOR INTERNAL USE ONLY.
Type string `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -277,8 +292,11 @@ type SpecDistributionModules struct {
Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"`
}
+// Configuration for the Auth module.
type SpecDistributionModulesAuth struct {
- // The base domain for the auth module
+ // The base domain for the ingresses created by the Auth module (Gangplank,
+ // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will
+ // use the `external` ingress class.
BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"`
// Dex corresponds to the JSON schema field "dex".
@@ -294,11 +312,25 @@ type SpecDistributionModulesAuth struct {
Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
}
+// Configuration for the Dex package.
type SpecDistributionModulesAuthDex struct {
- // The additional static clients for dex
+ // Additional static clients defitions that will be added to the default clients
+ // included with the distribution in Dex's configuration. Example:
+ //
+ // ```yaml
+ // additionalStaticClients:
+ // - id: my-custom-client
+ // name: "A custom additional static client"
+ // redirectURIs:
+ // - "https://myapp.tld/redirect"
+ // - "https://alias.tld/oidc-callback"
+ // secret: supersecretpassword
+ // ```
+ // Reference: https://dexidp.io/docs/connectors/local/
AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"`
- // The connectors for dex
+ // A list with each item defining a Dex connector. Follows Dex connectors
+ // configuration format: https://dexidp.io/docs/connectors/
Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"`
// Expiry corresponds to the JSON schema field "expiry".
@@ -316,25 +348,29 @@ type SpecDistributionModulesAuthDexExpiry struct {
SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"`
}
+// Override the common configuration with a particular configuration for the Auth
+// module.
type SpecDistributionModulesAuthOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
+ // Override the definition of the Auth module ingresses.
Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // The node selector to use to place the pods for the auth module
+ // Set to override the node selector used to place the pods of the Auth module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the auth module
+ // Set to override the tolerations that will be added to the pods of the Auth
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
type SpecDistributionModulesAuthOverridesIngress struct {
- // The host of the ingress
+ // Use this host for the ingress instead of the default one.
Host string `json:"host" yaml:"host" mapstructure:"host"`
- // The ingress class of the ingress
+ // Use this ingress class for the ingress instead of the default one.
IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"`
}
+// Override the definition of the Auth module ingresses.
type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress
type SpecDistributionModulesAuthPomerium interface{}
@@ -459,15 +495,23 @@ type SpecDistributionModulesAuthProvider struct {
// BasicAuth corresponds to the JSON schema field "basicAuth".
BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"`
- // The type of the provider, must be ***none***, ***sso*** or ***basicAuth***
+ // The type of the Auth provider, options are:
+ // - `none`: will disable authentication in the infrastructural ingresses.
+ // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO)
+ // and require authentication before accessing them.
+ // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth
+ // (username and password) authentication.
+ //
+ // Default is `none`.
Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"`
}
+// Configuration for the HTTP Basic Auth provider.
type SpecDistributionModulesAuthProviderBasicAuth struct {
- // The password for the basic auth
+ // The password for logging in with the HTTP basic authentication.
Password string `json:"password" yaml:"password" mapstructure:"password"`
- // The username for the basic auth
+ // The username for logging in with the HTTP basic authentication.
Username string `json:"username" yaml:"username" mapstructure:"username"`
}
@@ -479,11 +523,16 @@ const (
SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso"
)
+// Configuration for the Disaster Recovery module.
type SpecDistributionModulesDr struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // The type of the DR, must be ***none*** or ***on-premises***
+ // The type of the Disaster Recovery, must be `none` or `on-premises`. `none`
+ // disables the module and `on-premises` will install Velero and an optional MinIO
+ // deployment.
+ //
+ // Default is `none`.
Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"`
// Velero corresponds to the JSON schema field "velero".
@@ -497,6 +546,7 @@ const (
SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises"
)
+// Configuration for the Velero package.
type SpecDistributionModulesDrVelero struct {
// The storage backend type for Velero. `minio` will use an in-cluster MinIO
// deployment for object storage, `externalEndpoint` can be used to point to an
@@ -511,6 +561,9 @@ type SpecDistributionModulesDrVelero struct {
// Configuration for Velero's backup schedules.
Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"`
+
+ // Configuration for the additional snapshotController component installation.
+ SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"`
}
type SpecDistributionModulesDrVeleroBackend string
@@ -540,12 +593,35 @@ type SpecDistributionModulesDrVeleroExternalEndpoint struct {
// Configuration for Velero's backup schedules.
type SpecDistributionModulesDrVeleroSchedules struct {
- // Configuration for Velero's schedules cron.
- Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"`
+ // Configuration for Velero schedules.
+ Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"`
// Whether to install or not the default `manifests` and `full` backups schedules.
// Default is `true`.
Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
+}
+
+// Configuration for Velero schedules.
+type SpecDistributionModulesDrVeleroSchedulesDefinitions struct {
+ // Configuration for Velero's manifests backup schedule.
+ Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
+
+ // Configuration for Velero's manifests backup schedule.
+ Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+}
+
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct {
+ // The cron expression for the `full` backup schedule (default `0 1 * * *`).
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // EXPERIMENTAL (if you do more than one backups, the following backups after the
+ // first are not automatically restorable, see
+ // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for
+ // the manual restore solution): SnapshotMoveData specifies whether snapshot data
+ // should be moved. Velero will create a new volume from the snapshot and upload
+ // the content to the storageLocation.
+ SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"`
// The Time To Live (TTL) of the backups created by the backup schedules (default
// `720h0m0s`, 30 days). Notice that changing this value will affect only newly
@@ -553,35 +629,52 @@ type SpecDistributionModulesDrVeleroSchedules struct {
Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
}
-// Configuration for Velero's schedules cron.
-type SpecDistributionModulesDrVeleroSchedulesCron struct {
- // The cron expression for the `full` backup schedule (default `0 1 * * *`).
- Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
-
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct {
// The cron expression for the `manifests` backup schedule (default `*/15 * * *
// *`).
- Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // The Time To Live (TTL) of the backups created by the backup schedules (default
+ // `720h0m0s`, 30 days). Notice that changing this value will affect only newly
+ // created backups, prior backups will keep the old TTL.
+ Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
+}
+
+// Configuration for the additional snapshotController component installation.
+type SpecDistributionModulesDrVeleroSnapshotController struct {
+ // Whether to install or not the snapshotController component in the cluster.
+ // Before enabling this field, check if your CSI driver does not have
+ // snapshotController built-in.
+ Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
}
type SpecDistributionModulesIngress struct {
- // the base domain used for all the KFD ingresses, if in the nginx dual
- // configuration, it should be the same as the
- // .spec.distribution.modules.ingress.dns.private.name zone
+ // The base domain used for all the KFD infrastructural ingresses. If using the
+ // nginx `dual` type, this value should be the same as the domain associated with
+ // the `internal` ingress class.
BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"`
- // CertManager corresponds to the JSON schema field "certManager".
+ // Configuration for the cert-manager package. Required even if
+ // `ingress.nginx.type` is `none`, cert-manager is used for managing other
+ // certificates in the cluster besides the TLS termination certificates for the
+ // ingresses.
CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"`
// Forecastle corresponds to the JSON schema field "forecastle".
Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
- // Configurations for the nginx ingress controller module
+ // Configurations for the Ingress nginx controller package.
Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"`
// Overrides corresponds to the JSON schema field "overrides".
Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the cert-manager package. Required even if
+// `ingress.nginx.type` is `none`, cert-manager is used for managing other
+// certificates in the cluster besides the TLS termination certificates for the
+// ingresses.
type SpecDistributionModulesIngressCertManager struct {
// ClusterIssuer corresponds to the JSON schema field "clusterIssuer".
ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"`
@@ -590,17 +683,23 @@ type SpecDistributionModulesIngressCertManager struct {
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
+// Configuration for the cert-manager's ACME clusterIssuer used to request
+// certificates from Let's Encrypt.
type SpecDistributionModulesIngressCertManagerClusterIssuer struct {
- // The email of the cluster issuer
+ // The email address to use during the certificate issuing process.
Email string `json:"email" yaml:"email" mapstructure:"email"`
- // The name of the cluster issuer
+ // The name of the clusterIssuer.
Name string `json:"name" yaml:"name" mapstructure:"name"`
- // The custom solvers configurations
+ // The list of challenge solvers to use instead of the default one for the
+ // `http01` challenge. Check [cert manager's
+ // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types)
+ // for examples for this field.
Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"`
- // The type of the cluster issuer, must be ***http01***
+ // The type of the clusterIssuer. Only `http01` challenge is supported for
+ // KFDDistribution kind. See solvers for arbitrary configurations.
Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"`
}
@@ -620,14 +719,24 @@ type SpecDistributionModulesIngressNginx struct {
// Tls corresponds to the JSON schema field "tls".
Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"`
- // The type of the nginx ingress controller, must be ***none***, ***single*** or
- // ***dual***
+ // The type of the Ingress nginx controller, options are:
+ // - `none`: no ingress controller will be installed and no infrastructural
+ // ingresses will be created.
+ // - `single`: a single ingress controller with ingress class `nginx` will be
+ // installed to manage all the ingress resources, infrastructural ingresses will
+ // be created.
+ // - `dual`: two independent ingress controllers will be installed, one for the
+ // `internal` ingress class intended for private ingresses and one for the
+ // `external` ingress class intended for public ingresses. KFD infrastructural
+ // ingresses wil use the `internal` ingress class when using the dual type.
+ //
+ // Default is `single`.
Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"`
}
type SpecDistributionModulesIngressNginxTLS struct {
- // The provider of the TLS certificate, must be ***none***, ***certManager*** or
- // ***secret***
+ // The provider of the TLS certificates for the ingresses, one of: `none`,
+ // `certManager`, or `secret`.
Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"`
// Secret corresponds to the JSON schema field "secret".
@@ -642,15 +751,18 @@ const (
SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret"
)
+// Kubernetes TLS secret for the ingresses TLS certificate.
type SpecDistributionModulesIngressNginxTLSSecret struct {
- // Ca corresponds to the JSON schema field "ca".
+ // The Certificate Authority certificate file's content. You can use the
+ // `"{file://}"` notation to get the content from a file.
Ca string `json:"ca" yaml:"ca" mapstructure:"ca"`
- // The certificate file content or you can use the file notation to get the
- // content from a file
+ // The certificate file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
Cert string `json:"cert" yaml:"cert" mapstructure:"cert"`
- // Key corresponds to the JSON schema field "key".
+ // The signing key file's content. You can use the `"{file://}"` notation to
+ // get the content from a file.
Key string `json:"key" yaml:"key" mapstructure:"key"`
}
@@ -662,14 +774,17 @@ const (
SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single"
)
+// Override the common configuration with a particular configuration for the
+// Ingress module.
type SpecDistributionModulesIngressOverrides struct {
// Ingresses corresponds to the JSON schema field "ingresses".
Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // The node selector to use to place the pods for the ingress module
+ // Set to override the node selector used to place the pods of the Ingress module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
- // The tolerations that will be added to the pods for the ingress module
+ // Set to override the tolerations that will be added to the pods of the Ingress
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
@@ -678,6 +793,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct {
Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"`
}
+// Configuration for the Logging module.
type SpecDistributionModulesLogging struct {
// Cerebro corresponds to the JSON schema field "cerebro".
Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"`
@@ -700,83 +816,104 @@ type SpecDistributionModulesLogging struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
- // selects the logging stack. Choosing none will disable the centralized logging.
- // Choosing opensearch will deploy and configure the Logging Operator and an
+ // Selects the logging stack. Options are:
+ // - `none`: will disable the centralized logging.
+ // - `opensearch`: will deploy and configure the Logging Operator and an
// OpenSearch cluster (can be single or triple for HA) where the logs will be
- // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh
- // for storage. Choosing customOuput the Logging Operator will be deployed and
- // installed but with no local storage, you will have to create the needed Outputs
- // and ClusterOutputs to ship the logs to your desired storage.
+ // stored.
+ // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for
+ // storage.
+ // - `customOuputs`: the Logging Operator will be deployed and installed but
+ // without in-cluster storage, you will have to create the needed Outputs and
+ // ClusterOutputs to ship the logs to your desired storage.
+ //
+ // Default is `opensearch`.
Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"`
}
+// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
type SpecDistributionModulesLoggingCerebro struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-// when using the customOutputs logging type, you need to manually specify the spec
-// of the several Output and ClusterOutputs that the Logging Operator expects to
-// forward the logs collected by the pre-defined flows.
+// When using the `customOutputs` logging type, you need to manually specify the
+// spec of the several `Output` and `ClusterOutputs` that the Logging Operator
+// expects to forward the logs collected by the pre-defined flows.
type SpecDistributionModulesLoggingCustomOutputs struct {
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `audit` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Audit string `json:"audit" yaml:"audit" mapstructure:"audit"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `errors` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Errors string `json:"errors" yaml:"errors" mapstructure:"errors"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `events` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Events string `json:"events" yaml:"events" mapstructure:"events"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `infra` Flow will be sent. This
+ // will be the `spec` section of the `Output` object. It must be a string (and not
+ // a YAML object) following the OutputSpec definition. Use the `nullout` output to
+ // discard the flow: `nullout: {}`
Infra string `json:"infra" yaml:"infra" mapstructure:"infra"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `ingressNginx` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `kubernetes` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `systemdCommon` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"`
- // This value defines where the output from Flow will be sent. Will be the `spec`
- // section of the `Output` object. It must be a string (and not a YAML object)
- // following the OutputSpec definition. Use the nullout output to discard the
- // flow.
+ // This value defines where the output from the `systemdEtcd` Flow will be sent.
+ // This will be the `spec` section of the `Output` object. It must be a string
+ // (and not a YAML object) following the OutputSpec definition. Use the `nullout`
+ // output to discard the flow: `nullout: {}`
SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"`
}
+// Configuration for the Loki package.
type SpecDistributionModulesLoggingLoki struct {
- // Backend corresponds to the JSON schema field "backend".
+ // The storage backend type for Loki. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external object storage instead of deploying an in-cluster MinIO.
Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
+ // Configuration for Loki's external storage backend.
ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the
+ // time series database from BoltDB to TSDB and the schema from v11 to v13 that it
+ // uses to store the logs.
+ //
+ // The value of this field will determine the date when Loki will start writing
+ // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB
+ // and schema will be kept until they expire for reading purposes.
+ //
+ // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example:
+ // `2024-11-18`.
+ TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"`
}
type SpecDistributionModulesLoggingLokiBackend string
@@ -786,23 +923,25 @@ const (
SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio"
)
+// Configuration for Loki's external storage backend.
type SpecDistributionModulesLoggingLokiExternalEndpoint struct {
- // The access key id of the loki external endpoint
+ // The access key ID (username) for the external S3-compatible bucket.
AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
- // The bucket name of the loki external endpoint
+ // The bucket name of the external S3-compatible object storage.
BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
- // The endpoint of the loki external endpoint
+ // External S3-compatible endpoint for Loki's storage.
Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
- // If true, the loki external endpoint will be insecure
+ // If true, will use HTTP as protocol instead of HTTPS.
Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
- // The secret access key of the loki external endpoint
+ // The secret access key (password) for the external S3-compatible bucket.
SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
+// Configuration for Logging's MinIO deployment.
type SpecDistributionModulesLoggingMinio struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -810,15 +949,15 @@ type SpecDistributionModulesLoggingMinio struct {
// RootUser corresponds to the JSON schema field "rootUser".
RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
- // The PVC size for each minio disk, 6 disks total
+ // The PVC size for each MinIO disk, 6 disks total.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
}
type SpecDistributionModulesLoggingMinioRootUser struct {
- // The password of the minio root user
+ // The password for the default MinIO root user.
Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
- // The username of the minio root user
+ // The username for the default MinIO root user.
Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
}
@@ -829,10 +968,12 @@ type SpecDistributionModulesLoggingOpensearch struct {
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
- // The storage size for the opensearch pods
+ // The storage size for the OpenSearch volumes. Follows Kubernetes resources
+ // storage requests. Default is `150Gi`.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
- // The type of the opensearch, must be ***single*** or ***triple***
+ // The type of OpenSearch deployment. One of: `single` for a single replica or
+ // `triple` for an HA 3-replicas deployment.
Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -843,6 +984,7 @@ const (
SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple"
)
+// Configuration for the Logging Operator.
type SpecDistributionModulesLoggingOperator struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -854,1524 +996,1612 @@ const (
SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs"
SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki"
SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none"
- SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch"
)
-// configuration for the Monitoring module components
-type SpecDistributionModulesMonitoring struct {
- // Alertmanager corresponds to the JSON schema field "alertmanager".
- Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"`
-
- // BlackboxExporter corresponds to the JSON schema field "blackboxExporter".
- BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"`
-
- // Grafana corresponds to the JSON schema field "grafana".
- Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"`
-
- // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics".
- KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"`
-
- // Mimir corresponds to the JSON schema field "mimir".
- Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"`
-
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Prometheus corresponds to the JSON schema field "prometheus".
- Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"`
-
- // PrometheusAgent corresponds to the JSON schema field "prometheusAgent".
- PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"`
-
- // The type of the monitoring, must be ***none***, ***prometheus***,
- // ***prometheusAgent*** or ***mimir***.
- //
- // - `none`: will disable the whole monitoring stack.
- // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
- // instace, Alertmanager, a set of alert rules, exporters needed to monitor all
- // the components of the cluster, Grafana and a series of dashboards to view the
- // collected metrics, and more.
- // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus
- // in Agent mode (no alerting, no queries, no storage), and all the exporters
- // needed to get metrics for the status of the cluster and the workloads. Useful
- // when having a centralized (remote) Prometheus where to ship the metrics and not
- // storing them locally in the cluster.
- // - `mimir`: will install the same as the `prometheus` option, and in addition
- // Grafana Mimir that allows for longer retention of metrics and the usage of
- // Object Storage.
- Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
-
- // X509Exporter corresponds to the JSON schema field "x509Exporter".
- X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
+ }
+ *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
+ return nil
}
-type SpecDistributionModulesMonitoringAlertManager struct {
- // The webhook url to send deadman switch monitoring, for example to use with
- // healthchecks.io
- DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
-
- // If true, the default rules will be installed
- InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"`
-
- // The slack webhook url to send alerts
- SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"`
+var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
+ "none",
+ "single",
+ "dual",
}
-type SpecDistributionModulesMonitoringBlackboxExporter struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ }
+ *j = SpecDistributionModulesIngressNginxType(v)
+ return nil
}
-type SpecDistributionModulesMonitoringGrafana struct {
- // Setting this to true will deploy an additional `grafana-basic-auth` ingress
- // protected with Grafana's basic auth instead of SSO. It's intended use is as a
- // temporary ingress for when there are problems with the SSO login flow.
- //
- // Notice that by default anonymous access is enabled.
- BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's
- // role. Example:
- //
- // ```yaml
- // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' ||
- // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') &&
- // 'Viewer'
- // ```
- //
- // More details in [Grafana's
- // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping).
- UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["ca"]; !ok || v == nil {
+ return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ if v, ok := raw["cert"]; !ok || v == nil {
+ return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ type Plain SpecDistributionModulesIngressNginxTLSSecret
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
+ return nil
}
-type SpecDistributionModulesMonitoringKubeStateMetrics struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
+ }
+ *j = SpecDistributionModulesIngressNginxTLSProvider(v)
+ return nil
}
-type SpecDistributionModulesMonitoringMimir struct {
- // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***
- Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
- ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The retention time for the mimir pods
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
+ "certManager",
+ "secret",
+ "none",
}
-type SpecDistributionModulesMonitoringMimirBackend string
-
-const (
- SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint"
- SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio"
-)
-
-type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
- // The access key id of the external mimir backend
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the external mimir backend
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // The endpoint of the external mimir backend
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, the external mimir backend will not use tls
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["clusterIssuer"]; !ok || v == nil {
+ return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManager
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressCertManager(plain)
+ return nil
+}
- // The secret access key of the external mimir backend
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
+ }
+ type Plain SpecDistributionModulesIngressNginx
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressNginx(plain)
+ return nil
}
-type SpecDistributionModulesMonitoringMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The storage size for the minio pods
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringMinioRootUser struct {
- // The password for the minio root user
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the minio root user
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheus struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-
- // The retention size for the k8s Prometheus instance.
- RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
-
- // The retention time for the K8s Prometheus instance.
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-
- // The storage size for the k8s Prometheus instance.
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheusAgent struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-}
-
-type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringType string
-
-const (
- SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
- SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
- SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
- SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
-)
-
-type SpecDistributionModulesMonitoringX509Exporter struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesNetworking struct {
- // Cilium corresponds to the JSON schema field "cilium".
- Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
- TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
-
- // The type of networking to use, either ***none***, ***calico*** or ***cilium***
- Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesNetworkingCilium struct {
- // MaskSize corresponds to the JSON schema field "maskSize".
- MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // PodCidr corresponds to the JSON schema field "podCidr".
- PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"`
-}
-
-type SpecDistributionModulesNetworkingTigeraOperator struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesNetworkingType string
-
-const (
- SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico"
- SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium"
- SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none"
-)
-
-type SpecDistributionModulesPolicy struct {
- // Gatekeeper corresponds to the JSON schema field "gatekeeper".
- Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
-
- // Kyverno corresponds to the JSON schema field "kyverno".
- Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The type of security to use, either ***none***, ***gatekeeper*** or
- // ***kyverno***
- Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesPolicyGatekeeper struct {
- // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
- // enforce the constraints on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // The enforcement action to use for the gatekeeper module
- EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
-
- // If true, the default policies will be installed
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
-
-const (
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
-)
-
-type SpecDistributionModulesPolicyKyverno struct {
- // This parameter adds namespaces to Kyverno's exemption list, so it will not
- // enforce the constraints on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // If true, the default policies will be installed
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The validation failure action to use for the kyverno module
- ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
-}
-
-type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
-
-const (
- SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
- SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
-)
-
-type SpecDistributionModulesPolicyType string
-
-const (
- SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
- SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
- SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
-)
-
-type SpecDistributionModulesTracing struct {
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Tempo corresponds to the JSON schema field "tempo".
- Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
-
- // The type of tracing to use, either ***none*** or ***tempo***
- Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-type SpecDistributionModulesTracingMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The storage size for the minio pods
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesTracingMinioRootUser struct {
- // The password for the minio root user
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the minio root user
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-type SpecDistributionModulesTracingTempo struct {
- // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***
- Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint".
- ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The retention time for the tempo pods
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-}
-
-type SpecDistributionModulesTracingTempoBackend string
-
-const (
- SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
- SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
-)
-
-type SpecDistributionModulesTracingTempoExternalEndpoint struct {
- // The access key id of the external tempo backend
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the external tempo backend
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // The endpoint of the external tempo backend
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, the external tempo backend will not use tls
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
-
- // The secret access key of the external tempo backend
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
-}
-
-type SpecDistributionModulesTracingType string
-
-const (
- SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
- SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
-)
-
-type SpecPlugins struct {
- // Helm corresponds to the JSON schema field "helm".
- Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"`
-
- // Kustomize corresponds to the JSON schema field "kustomize".
- Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"`
-}
-
-type SpecPluginsHelm struct {
- // Releases corresponds to the JSON schema field "releases".
- Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"`
-
- // Repositories corresponds to the JSON schema field "repositories".
- Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"`
-}
-
-type SpecPluginsHelmReleases []struct {
- // The chart of the release
- Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
-
- // The name of the release
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // The namespace of the release
- Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"`
-
- // Set corresponds to the JSON schema field "set".
- Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"`
-
- // The values of the release
- Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"`
-
- // The version of the release
- Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"`
-}
-
-type SpecPluginsHelmReleasesElemSetElem struct {
- // The name of the set
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // The value of the set
- Value string `json:"value" yaml:"value" mapstructure:"value"`
-}
-
-type SpecPluginsHelmRepositories []struct {
- // The name of the repository
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // The url of the repository
- Url string `json:"url" yaml:"url" mapstructure:"url"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["email"]; !ok || v == nil {
+ return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
+ return nil
}
-type SpecPluginsKustomize []struct {
- // The folder of the kustomize plugin
- Folder string `json:"folder" yaml:"folder" mapstructure:"folder"`
-
- // The name of the kustomize plugin
- Name string `json:"name" yaml:"name" mapstructure:"name"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
+ }
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
+ return nil
}
-type TypesCidr string
-
-type TypesEnvRef string
-
-type TypesFileRef string
-
-type TypesFuryModuleComponentOverrides struct {
- // The node selector to use to place the pods for the minio module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-
- // The tolerations that will be added to the pods for the cert-manager module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
+ "http01",
}
-type TypesFuryModuleComponentOverrides_1 struct {
- // NodeSelector corresponds to the JSON schema field "nodeSelector".
- NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-
- // Tolerations corresponds to the JSON schema field "tolerations".
- Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["baseDomain"]; !ok || v == nil {
+ return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
+ }
+ if v, ok := raw["nginx"]; !ok || v == nil {
+ return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ }
+ type Plain SpecDistributionModulesIngress
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngress(plain)
+ return nil
}
-type TypesFuryModuleOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
- Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
-
- // The node selector to use to place the pods for the security module
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ }
+ type Plain SpecDistributionModulesDr
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesDr(plain)
+ return nil
+}
- // The tolerations that will be added to the pods for the monitoring module
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v)
+ }
+ *j = SpecDistributionModulesDrVeleroBackend(v)
+ return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["dr"]; !ok || v == nil {
- return fmt.Errorf("field dr in SpecDistributionModules: required")
+ if v, ok := raw["audit"]; !ok || v == nil {
+ return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["ingress"]; !ok || v == nil {
- return fmt.Errorf("field ingress in SpecDistributionModules: required")
+ if v, ok := raw["errors"]; !ok || v == nil {
+ return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["logging"]; !ok || v == nil {
- return fmt.Errorf("field logging in SpecDistributionModules: required")
+ if v, ok := raw["events"]; !ok || v == nil {
+ return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
}
- if v, ok := raw["policy"]; !ok || v == nil {
- return fmt.Errorf("field policy in SpecDistributionModules: required")
+ if v, ok := raw["infra"]; !ok || v == nil {
+ return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
}
- type Plain SpecDistributionModules
+ if v, ok := raw["ingressNginx"]; !ok || v == nil {
+ return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["kubernetes"]; !ok || v == nil {
+ return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdCommon"]; !ok || v == nil {
+ return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdEtcd"]; !ok || v == nil {
+ return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ type Plain SpecDistributionModulesLoggingCustomOutputs
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModules(plain)
+ *j = SpecDistributionModulesLoggingCustomOutputs(plain)
return nil
}
-var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
- "none",
- "prometheus",
- "prometheusAgent",
- "mimir",
+var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
+}
+
+var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
}
- *j = SpecDistributionModulesMonitoringMimirBackend(v)
+ *j = SpecDistributionModulesLoggingLokiBackend(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesDrType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ }
+ *j = SpecDistributionModulesDrType(v)
+ return nil
+}
+
+var enumValues_SpecDistributionModulesDrType = []interface{}{
+ "none",
+ "on-premises",
+}
+
+// Override the common configuration with a particular configuration for the
+// module.
+type TypesFuryModuleOverrides struct {
+ // Ingresses corresponds to the JSON schema field "ingresses".
+ Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
+
+ // Set to override the node selector used to place the pods of the module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
+type TypesKubeResourcesLimits struct {
+ // The CPU limit for the Pod. Example: `1000m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory limit for the Pod. Example: `1G`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+}
+
+type TypesKubeResourcesRequests struct {
+ // The CPU request for the Pod, in cores. Example: `500m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory request for the Pod. Example: `500M`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+}
+
+type TypesKubeResources struct {
+ // Limits corresponds to the JSON schema field "limits".
+ Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
+
+ // Requests corresponds to the JSON schema field "requests".
+ Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
+}
+
+type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
+ if v, ok := raw["tsdbStartDate"]; !ok || v == nil {
+ return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required")
}
- type Plain SpecDistributionModulesMonitoring
+ type Plain SpecDistributionModulesLoggingLoki
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesMonitoring(plain)
+ *j = SpecDistributionModulesLoggingLoki(plain)
return nil
}
-var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
- "minio",
- "externalEndpoint",
+type TypesFuryModuleOverridesIngress struct {
+ // If true, the ingress will not have authentication even if
+ // `.spec.modules.auth.provider.type` is SSO or Basic Auth.
+ DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
+
+ // Use this host for the ingress instead of the default one.
+ Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
+
+ // Use this ingress class for the ingress instead of the default one.
+ IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
}
- type Plain SpecDistributionModulesLogging
+ type Plain SpecDistributionModulesAuth
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLogging(plain)
+ *j = SpecDistributionModulesAuth(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["maskSize"]; !ok || v == nil {
- return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required")
- }
- if v, ok := raw["podCidr"]; !ok || v == nil {
- return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
}
- type Plain SpecDistributionModulesNetworkingCilium
+ type Plain SpecDistributionModulesAuthProvider
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesNetworkingCilium(plain)
+ *j = SpecDistributionModulesAuthProvider(plain)
return nil
}
+var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
+ "single",
+ "triple",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingType {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
}
- *j = SpecDistributionModulesLoggingType(v)
+ *j = SpecDistributionModulesLoggingOpensearchType(v)
return nil
}
-var enumValues_SpecDistributionModulesLoggingType = []interface{}{
- "none",
- "opensearch",
- "loki",
- "customOutputs",
-}
-
-var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
- "none",
- "calico",
- "cilium",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
+ for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
}
- *j = SpecDistributionModulesNetworkingType(v)
+ *j = SpecDistributionModulesAuthProviderType(v)
return nil
}
+var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
+ "none",
+ "basicAuth",
+ "sso",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
+ if v, ok := raw["password"]; !ok || v == nil {
+ return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
}
- type Plain SpecDistributionModulesLoggingOpensearch
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ }
+ type Plain SpecDistributionModulesAuthProviderBasicAuth
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLoggingOpensearch(plain)
+ *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
+ type Plain SpecDistributionModulesLoggingOpensearch
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- *j = SpecDistributionModulesLoggingOpensearchType(v)
+ *j = SpecDistributionModulesLoggingOpensearch(plain)
return nil
}
-type TypesKubeResources struct {
- // Limits corresponds to the JSON schema field "limits".
- Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
-
- // Requests corresponds to the JSON schema field "requests".
- Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
-}
-
-var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
- "single",
- "triple",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesNetworking: required")
+ if v, ok := raw["host"]; !ok || v == nil {
+ return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
}
- type Plain SpecDistributionModulesNetworking
+ if v, ok := raw["ingressClass"]; !ok || v == nil {
+ return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ }
+ type Plain SpecDistributionModulesAuthOverridesIngress
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesNetworking(plain)
+ *j = SpecDistributionModulesAuthOverridesIngress(plain)
return nil
}
-type TypesKubeResourcesRequests struct {
- // The cpu request for the prometheus pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
-
- // The memory request for the opensearch pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
-}
-
-var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
- "deny",
- "dryrun",
- "warn",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["connectors"]; !ok || v == nil {
+ return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
+ type Plain SpecDistributionModulesAuthDex
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
+ *j = SpecDistributionModulesAuthDex(plain)
return nil
}
-type TypesKubeResourcesLimits struct {
- // The cpu limit for the loki pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
-
- // The memory limit for the opensearch pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+var enumValues_SpecDistributionModulesLoggingType = []interface{}{
+ "none",
+ "opensearch",
+ "loki",
+ "customOutputs",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
}
- *j = SpecDistributionModulesLoggingLokiBackend(v)
+ *j = SpecDistributionModulesLoggingType(v)
return nil
}
-var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
- "minio",
- "externalEndpoint",
+type TypesFuryModuleComponentOverrides struct {
+ // Set to override the node selector used to place the pods of the package.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the package.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
+const SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch"
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["audit"]; !ok || v == nil {
- return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["errors"]; !ok || v == nil {
- return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["events"]; !ok || v == nil {
- return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["infra"]; !ok || v == nil {
- return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["ingressNginx"]; !ok || v == nil {
- return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["kubernetes"]; !ok || v == nil {
- return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdCommon"]; !ok || v == nil {
- return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdEtcd"]; !ok || v == nil {
- return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
}
- type Plain SpecDistributionModulesLoggingCustomOutputs
+ type Plain SpecDistributionCustomPatchesSecretGeneratorResource
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLoggingCustomOutputs(plain)
+ *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["enforcementAction"]; !ok || v == nil {
- return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
- }
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesPolicyGatekeeper
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
}
- *j = SpecDistributionModulesPolicyGatekeeper(plain)
+ *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
return nil
}
+var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
+ "create",
+ "replace",
+ "merge",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["baseDomain"]; !ok || v == nil {
- return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["nginx"]; !ok || v == nil {
- return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
}
- type Plain SpecDistributionModulesIngress
+ type Plain SpecDistributionModulesLogging
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngress(plain)
+ *j = SpecDistributionModulesLogging(plain)
return nil
}
-var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
- "Audit",
- "Enforce",
+type SpecDistributionModulesMonitoringAlertManager struct {
+ // The webhook URL to send dead man's switch monitoring, for example to use with
+ // healthchecks.io.
+ DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
+
+ // Set to false to avoid installing the Prometheus rules (alerts) included with
+ // the distribution.
+ InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"`
+
+ // The Slack webhook URL where to send the infrastructural and workload alerts to.
+ SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringBlackboxExporter struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringGrafana struct {
+ // Setting this to true will deploy an additional `grafana-basic-auth` ingress
+ // protected with Grafana's basic auth instead of SSO. It's intended use is as a
+ // temporary ingress for when there are problems with the SSO login flow.
+ //
+ // Notice that by default anonymous access is enabled.
+ BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's
+ // role. Example:
+ //
+ // ```yaml
+ // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' ||
+ // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') &&
+ // 'Viewer'
+ // ```
+ //
+ // More details in [Grafana's
+ // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping).
+ UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringKubeStateMetrics struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringMimirBackend string
+
+var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
}
- *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
+ *j = SpecDistributionModulesMonitoringMimirBackend(v)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required")
- }
- type Plain SpecDistributionModulesIngressNginx
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressNginx(plain)
- return nil
+const (
+ SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio"
+ SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint"
+)
+
+// Configuration for Mimir's external storage backend.
+type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // The external S3-compatible endpoint for Mimir's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
+}
+
+// Configuration for the Mimir package.
+type SpecDistributionModulesMonitoringMimir struct {
+ // The storage backend type for Mimir. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Mimir's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The retention time for the logs stored in Mimir. Default is `30d`. Value must
+ // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365
+ // days.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
+
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Monitoring's MinIO deployment.
+type SpecDistributionModulesMonitoringMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheus struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // The retention size for the `k8s` Prometheus instance.
+ RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
+
+ // The retention time for the `k8s` Prometheus instance.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+
+ // The storage size for the `k8s` Prometheus instance.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheusAgent struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringType string
+
+var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
+ "none",
+ "prometheus",
+ "prometheusAgent",
+ "mimir",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
}
- *j = SpecDistributionModulesIngressNginxType(v)
+ *j = SpecDistributionModulesMonitoringType(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
- "none",
- "single",
- "dual",
-}
+const (
+ SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
+ SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
+ SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
+ SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
+)
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
- }
- if v, ok := raw["validationFailureAction"]; !ok || v == nil {
- return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
- }
- type Plain SpecDistributionModulesPolicyKyverno
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesPolicyKyverno(plain)
- return nil
+type SpecDistributionModulesMonitoringX509Exporter struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
- }
- type Plain SpecDistributionModulesIngressNginxTLS
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressNginxTLS(plain)
- return nil
-}
+// Configuration for the Monitoring module.
+type SpecDistributionModulesMonitoring struct {
+ // Alertmanager corresponds to the JSON schema field "alertmanager".
+ Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"`
-var enumValues_SpecDistributionModulesPolicyType = []interface{}{
- "none",
- "gatekeeper",
- "kyverno",
+ // BlackboxExporter corresponds to the JSON schema field "blackboxExporter".
+ BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"`
+
+ // Grafana corresponds to the JSON schema field "grafana".
+ Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"`
+
+ // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics".
+ KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"`
+
+ // Mimir corresponds to the JSON schema field "mimir".
+ Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"`
+
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Prometheus corresponds to the JSON schema field "prometheus".
+ Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"`
+
+ // PrometheusAgent corresponds to the JSON schema field "prometheusAgent".
+ PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"`
+
+ // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or
+ // `mimir`.
+ //
+ // - `none`: will disable the whole monitoring stack.
+ // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
+ // instance, Alertmanager, a set of alert rules, exporters needed to monitor all
+ // the components of the cluster, Grafana and a series of dashboards to view the
+ // collected metrics, and more.
+ // - `prometheusAgent`: will install Prometheus operator, an instance of
+ // Prometheus in Agent mode (no alerting, no queries, no storage), and all the
+ // exporters needed to get metrics for the status of the cluster and the
+ // workloads. Useful when having a centralized (remote) Prometheus where to ship
+ // the metrics and not storing them locally in the cluster.
+ // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir
+ // that allows for longer retention of metrics and the usage of Object Storage.
+ //
+ // Default is `prometheus`.
+ Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
+
+ // X509Exporter corresponds to the JSON schema field "x509Exporter".
+ X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
}
- *j = SpecDistributionModulesPolicyType(v)
+ type Plain SpecDistributionModulesMonitoring
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesMonitoring(plain)
return nil
}
+type TypesCidr string
+
+type SpecDistributionModulesNetworkingCilium struct {
+ // The mask size to use for the Pods network on each node.
+ MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Allows specifing a CIDR for the Pods network different from
+ // `.spec.kubernetes.podCidr`. If not set the default is to use
+ // `.spec.kubernetes.podCidr`.
+ PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ca"]; !ok || v == nil {
- return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
- }
- if v, ok := raw["cert"]; !ok || v == nil {
- return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["maskSize"]; !ok || v == nil {
+ return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required")
}
- if v, ok := raw["key"]; !ok || v == nil {
- return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ if v, ok := raw["podCidr"]; !ok || v == nil {
+ return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required")
}
- type Plain SpecDistributionModulesIngressNginxTLSSecret
+ type Plain SpecDistributionModulesNetworkingCilium
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
+ *j = SpecDistributionModulesNetworkingCilium(plain)
return nil
}
+type SpecDistributionModulesNetworkingTigeraOperator struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesNetworkingType string
+
+var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
+ "none",
+ "calico",
+ "cilium",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
+ for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
}
- *j = SpecDistributionModulesIngressNginxTLSProvider(v)
+ *j = SpecDistributionModulesNetworkingType(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
- "certManager",
- "secret",
- "none",
-}
+const (
+ SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none"
+ SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico"
+ SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium"
+)
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["clusterIssuer"]; !ok || v == nil {
- return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
- }
- type Plain SpecDistributionModulesIngressCertManager
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressCertManager(plain)
- return nil
+// Configuration for the Networking module.
+type SpecDistributionModulesNetworking struct {
+ // Cilium corresponds to the JSON schema field "cilium".
+ Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
+ TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
+
+ // The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or
+ // `cilium`.
+ Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
+ return fmt.Errorf("field type in SpecDistributionModulesNetworking: required")
}
- type Plain SpecDistributionModulesPolicy
+ type Plain SpecDistributionModulesNetworking
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicy(plain)
+ *j = SpecDistributionModulesNetworking(plain)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["email"]; !ok || v == nil {
- return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
- return nil
+type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
+
+var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
+ "deny",
+ "dryrun",
+ "warn",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
}
- *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
+ *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
- "http01",
-}
+const (
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
+)
-var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
- "minio",
- "externalEndpoint",
-}
+// Configuration for the Gatekeeper package.
+type SpecDistributionModulesPolicyGatekeeper struct {
+ // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
+ // enforce the constraints on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
- }
- *j = SpecDistributionModulesTracingTempoBackend(v)
- return nil
+ // The default enforcement action to use for the included constraints. `deny` will
+ // block the admission when violations to the policies are found, `warn` will show
+ // a message to the user but will admit the violating requests and `dryrun` won't
+ // give any feedback to the user but it will log the violations.
+ EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
+
+ // Set to `false` to avoid installing the default Gatekeeper policies (constraints
+ // templates and constraints) included with the distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ if v, ok := raw["enforcementAction"]; !ok || v == nil {
+ return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
}
- type Plain SpecDistributionModulesDr
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
+ }
+ type Plain SpecDistributionModulesPolicyGatekeeper
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDr(plain)
+ *j = SpecDistributionModulesPolicyGatekeeper(plain)
return nil
}
+type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
+
+var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
+ "Audit",
+ "Enforce",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v)
+ type Plain SpecDistributionModulesIngressNginxTLS
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- *j = SpecDistributionModulesDrVeleroBackend(v)
+ *j = SpecDistributionModulesIngressNginxTLS(plain)
return nil
}
-var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{
- "minio",
- "externalEndpoint",
+const (
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
+)
+
+// Configuration for the Kyverno package.
+type SpecDistributionModulesPolicyKyverno struct {
+ // This parameter adds namespaces to Kyverno's exemption list, so it will not
+ // enforce the policies on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
+
+ // Set to `false` to avoid installing the default Kyverno policies included with
+ // distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The validation failure action to use for the policies, `Enforce` will block
+ // when a request does not comply with the policies and `Audit` will not block but
+ // log when a request does not comply with the policies.
+ ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ if v, ok := raw["validationFailureAction"]; !ok || v == nil {
+ return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
}
- *j = SpecDistributionModulesDrType(v)
+ type Plain SpecDistributionModulesPolicyKyverno
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesPolicyKyverno(plain)
return nil
}
-var enumValues_SpecDistributionModulesDrType = []interface{}{
- "none",
- "on-premises",
-}
+type SpecDistributionModulesPolicyType string
-var enumValues_SpecDistributionModulesTracingType = []interface{}{
+var enumValues_SpecDistributionModulesPolicyType = []interface{}{
"none",
- "tempo",
+ "gatekeeper",
+ "kyverno",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingType {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
}
- *j = SpecDistributionModulesTracingType(v)
+ *j = SpecDistributionModulesPolicyType(v)
return nil
}
-type TypesFuryModuleOverridesIngress struct {
- // If true, the ingress will not have authentication
- DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
+const (
+ SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
+ SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
+ SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
+)
- // The host of the ingress
- Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
+// Configuration for the Policy module.
+type SpecDistributionModulesPolicy struct {
+ // Gatekeeper corresponds to the JSON schema field "gatekeeper".
+ Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
- // The ingress class of the ingress
- IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
-}
+ // Kyverno corresponds to the JSON schema field "kyverno".
+ Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
-type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
- }
- type Plain SpecDistributionModulesAuth
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesAuth(plain)
- return nil
+ // The type of policy enforcement to use, either `none`, `gatekeeper` or
+ // `kyverno`.
+ //
+ // Default is `none`.
+ Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
+ return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
}
- type Plain SpecDistributionModulesTracing
+ type Plain SpecDistributionModulesPolicy
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesTracing(plain)
+ *j = SpecDistributionModulesPolicy(plain)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
- }
- type Plain SpecDistributionModulesAuthProvider
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesAuthProvider(plain)
- return nil
+type SpecDistributionModulesTracingMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
+
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Tracing's MinIO deployment.
+type SpecDistributionModulesTracingMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesTracingTempoBackend string
+
+var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
+ for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
}
- *j = SpecDistributionModulesMonitoringType(v)
+ *j = SpecDistributionModulesTracingTempoBackend(v)
return nil
}
+const (
+ SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
+ SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
+)
+
+// Configuration for Tempo's external storage backend.
+type SpecDistributionModulesTracingTempoExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // The external S3-compatible endpoint for Tempo's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
+}
+
+// Configuration for the Tempo package.
+type SpecDistributionModulesTracingTempo struct {
+ // The storage backend type for Tempo. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Tempo's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The retention time for the traces stored in Tempo.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+}
+
+type SpecDistributionModulesTracingType string
+
+var enumValues_SpecDistributionModulesTracingType = []interface{}{
+ "none",
+ "tempo",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
+ for _, expected := range enumValues_SpecDistributionModulesTracingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
}
- *j = SpecDistributionModulesAuthProviderType(v)
+ *j = SpecDistributionModulesTracingType(v)
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["kubeconfig"]; !ok || v == nil {
- return fmt.Errorf("field kubeconfig in SpecDistribution: required")
- }
- if v, ok := raw["modules"]; !ok || v == nil {
- return fmt.Errorf("field modules in SpecDistribution: required")
- }
- type Plain SpecDistribution
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistribution(plain)
- return nil
-}
+const (
+ SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
+ SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
+)
-var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
- "none",
- "basicAuth",
- "sso",
+// Configuration for the Tracing module.
+type SpecDistributionModulesTracing struct {
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Tempo corresponds to the JSON schema field "tempo".
+ Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
+
+ // The type of tracing to use, either `none` or `tempo`. `none` will disable the
+ // Tracing module and `tempo` will install a Grafana Tempo deployment.
+ //
+ // Default is `tempo`.
+ Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required")
- }
- if v, ok := raw["value"]; !ok || v == nil {
- return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesTracing: required")
}
- type Plain SpecPluginsHelmReleasesElemSetElem
+ type Plain SpecDistributionModulesTracing
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecPluginsHelmReleasesElemSetElem(plain)
+ *j = SpecDistributionModulesTracing(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["password"]; !ok || v == nil {
- return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
}
- type Plain SpecDistributionModulesAuthProviderBasicAuth
+ type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["host"]; !ok || v == nil {
- return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["dr"]; !ok || v == nil {
+ return fmt.Errorf("field dr in SpecDistributionModules: required")
}
- if v, ok := raw["ingressClass"]; !ok || v == nil {
- return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
+ if v, ok := raw["ingress"]; !ok || v == nil {
+ return fmt.Errorf("field ingress in SpecDistributionModules: required")
}
- type Plain SpecDistributionModulesAuthOverridesIngress
+ if v, ok := raw["logging"]; !ok || v == nil {
+ return fmt.Errorf("field logging in SpecDistributionModules: required")
+ }
+ if v, ok := raw["policy"]; !ok || v == nil {
+ return fmt.Errorf("field policy in SpecDistributionModules: required")
+ }
+ type Plain SpecDistributionModules
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthOverridesIngress(plain)
+ *j = SpecDistributionModules(plain)
return nil
}
+type TypesKubeLabels map[string]string
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
+func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["connectors"]; !ok || v == nil {
- return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
+ if v, ok := raw["kubeconfig"]; !ok || v == nil {
+ return fmt.Errorf("field kubeconfig in SpecDistribution: required")
+ }
+ if v, ok := raw["modules"]; !ok || v == nil {
+ return fmt.Errorf("field modules in SpecDistribution: required")
}
- type Plain SpecDistributionModulesAuthDex
+ type Plain SpecDistribution
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthDex(plain)
+ *j = SpecDistribution(plain)
return nil
}
+type SpecPluginsHelmReleasesElemSetElem struct {
+ // The name of the set
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // The value of the set
+ Value string `json:"value" yaml:"value" mapstructure:"value"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
+func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
+ return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required")
}
- type Plain SpecDistributionCustomPatchesSecretGeneratorResource
+ if v, ok := raw["value"]; !ok || v == nil {
+ return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required")
+ }
+ type Plain SpecPluginsHelmReleasesElemSetElem
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
+ *j = SpecPluginsHelmReleasesElemSetElem(plain)
return nil
}
+type SpecPluginsHelmReleases []struct {
+ // The chart of the release
+ Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
+
+ // Disable running `helm diff` validation when installing the plugin, it will
+ // still be done when upgrading.
+ DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"`
+
+ // The name of the release
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // The namespace of the release
+ Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"`
+
+ // Set corresponds to the JSON schema field "set".
+ Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"`
+
+ // The values of the release
+ Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"`
+
+ // The version of the release
+ Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"`
+}
+
+type SpecPluginsHelmRepositories []struct {
+ // The name of the repository
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // The url of the repository
+ Url string `json:"url" yaml:"url" mapstructure:"url"`
+}
+
+type SpecPluginsHelm struct {
+ // Releases corresponds to the JSON schema field "releases".
+ Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"`
+
+ // Repositories corresponds to the JSON schema field "repositories".
+ Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"`
+}
+
+type SpecPluginsKustomize []struct {
+ // The folder of the kustomize plugin
+ Folder string `json:"folder" yaml:"folder" mapstructure:"folder"`
+
+ // The name of the kustomize plugin
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+}
+
+type SpecPlugins struct {
+ // Helm corresponds to the JSON schema field "helm".
+ Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"`
+
+ // Kustomize corresponds to the JSON schema field "kustomize".
+ Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
+ for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v)
}
- *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v)
return nil
}
-var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
- "create",
- "replace",
- "merge",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
func (j *Spec) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2396,46 +2626,6 @@ func (j *Spec) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
- }
- type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
- return nil
-}
-
-type TypesKubeLabels map[string]string
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v)
- }
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v)
- return nil
-}
-
var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{
"create",
"replace",
@@ -2502,15 +2692,39 @@ func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error {
return nil
}
-const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute"
-
-type TypesKubeTolerationOperator string
-
var enumValues_TypesKubeTolerationOperator = []interface{}{
"Exists",
"Equal",
}
+type TypesKubeTolerationOperator string
+
+const (
+ TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute"
+ TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule"
+ TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule"
+)
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_TypesKubeTolerationEffect {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v)
+ }
+ *j = TypesKubeTolerationEffect(v)
+ return nil
+}
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2569,9 +2783,9 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error {
}
const (
- TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute"
TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule"
TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule"
+ TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute"
)
type TypesKubeTolerationOperator_1 string
@@ -2644,31 +2858,22 @@ func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error {
return nil
}
-const (
- TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule"
- TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule"
-)
+type TypesFuryModuleComponentOverrides_1 struct {
+ // NodeSelector corresponds to the JSON schema field "nodeSelector".
+ NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_TypesKubeTolerationEffect {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v)
- }
- *j = TypesKubeTolerationEffect(v)
- return nil
+ // Tolerations corresponds to the JSON schema field "tolerations".
+ Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
+var enumValues_TypesKubeTolerationEffect = []interface{}{
+ "NoSchedule",
+ "PreferNoSchedule",
+ "NoExecute",
}
+type TypesKubeTolerationEffect string
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2687,19 +2892,82 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error {
return nil
}
-var enumValues_TypesKubeTolerationEffect = []interface{}{
+type TypesEnvRef string
+
+type TypesFileRef string
+
+type TypesIpAddress string
+
+type TypesKubeLabels_1 map[string]string
+
+type TypesKubeTaintsEffect string
+
+var enumValues_TypesKubeTaintsEffect = []interface{}{
"NoSchedule",
"PreferNoSchedule",
"NoExecute",
}
-type TypesKubeTolerationEffect string
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_TypesKubeTaintsEffect {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v)
+ }
+ *j = TypesKubeTaintsEffect(v)
+ return nil
+}
-type TypesIpAddress string
+const (
+ TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule"
+ TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule"
+ TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute"
+)
-type TypesKubeLabels_1 map[string]string
+type TypesKubeTaints struct {
+ // Effect corresponds to the JSON schema field "effect".
+ Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"`
-type TypesKubeTaints []string
+ // Key corresponds to the JSON schema field "key".
+ Key string `json:"key" yaml:"key" mapstructure:"key"`
+
+ // Value corresponds to the JSON schema field "value".
+ Value string `json:"value" yaml:"value" mapstructure:"value"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["effect"]; !ok || v == nil {
+ return fmt.Errorf("field effect in TypesKubeTaints: required")
+ }
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in TypesKubeTaints: required")
+ }
+ if v, ok := raw["value"]; !ok || v == nil {
+ return fmt.Errorf("field value in TypesKubeTaints: required")
+ }
+ type Plain TypesKubeTaints
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = TypesKubeTaints(plain)
+ return nil
+}
type TypesSemVer string
diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go
index 854a24a16..3d0b8199b 100644
--- a/pkg/apis/onpremises/v1alpha2/public/schema.go
+++ b/pkg/apis/onpremises/v1alpha2/public/schema.go
@@ -6,6 +6,8 @@ import (
"encoding/json"
"fmt"
"reflect"
+
+ "github.com/sighupio/go-jsonschema/pkg/types"
)
type Metadata struct {
@@ -14,6 +16,7 @@ type Metadata struct {
Name string `json:"name" yaml:"name" mapstructure:"name"`
}
+// A KFD Cluster deployed on top of a set of existing VMs.
type OnpremisesKfdV1Alpha2 struct {
// ApiVersion corresponds to the JSON schema field "apiVersion".
ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"`
@@ -38,7 +41,7 @@ type Spec struct {
// Defines which KFD version will be installed and, in consequence, the Kubernetes
// version used to create the cluster. It supports git tags and branches. Example:
- // v1.30.1.
+ // `v1.30.1`.
DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"`
// Kubernetes corresponds to the JSON schema field "kubernetes".
@@ -61,8 +64,12 @@ type SpecDistribution struct {
// Common configuration for all the distribution modules.
type SpecDistributionCommon struct {
+ // EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided
+ // for core modules.
+ NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"`
+
// The node selector to use to place the pods for all the KFD modules. Follows
- // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`
+ // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// Provider corresponds to the JSON schema field "provider".
@@ -70,6 +77,9 @@ type SpecDistributionCommon struct {
// URL of the registry where to pull images from for the Distribution phase.
// (Default is `registry.sighup.io/fury`).
+ //
+ // NOTE: If plugins are pulling from the default registry, the registry will be
+ // replaced for the plugin too.
Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
// The relative path to the vendor directory, does not need to be changed.
@@ -288,9 +298,9 @@ type SpecDistributionModules struct {
// Configuration for the Auth module.
type SpecDistributionModulesAuth struct {
- // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium,
- // Dex). Notice that when nginx type is dual, these will use the `external`
- // ingress class.
+ // The base domain for the ingresses created by the Auth module (Gangplank,
+ // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will
+ // use the `external` ingress class.
BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"`
// Dex corresponds to the JSON schema field "dex".
@@ -541,6 +551,8 @@ type SpecDistributionModulesAuthProvider struct {
// and require authentication before accessing them.
// - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth
// (username and password) authentication.
+ //
+ // Default is `none`.
Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -569,6 +581,8 @@ type SpecDistributionModulesDr struct {
// The type of the Disaster Recovery, must be `none` or `on-premises`. `none`
// disables the module and `on-premises` will install Velero and an optional MinIO
// deployment.
+ //
+ // Default is `none`.
Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"`
// Velero corresponds to the JSON schema field "velero".
@@ -597,6 +611,9 @@ type SpecDistributionModulesDrVelero struct {
// Configuration for Velero's backup schedules.
Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"`
+
+ // Configuration for the additional snapshotController component installation.
+ SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"`
}
type SpecDistributionModulesDrVeleroBackend string
@@ -626,12 +643,35 @@ type SpecDistributionModulesDrVeleroExternalEndpoint struct {
// Configuration for Velero's backup schedules.
type SpecDistributionModulesDrVeleroSchedules struct {
- // Configuration for Velero's schedules cron.
- Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"`
+ // Configuration for Velero schedules.
+ Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"`
// Whether to install or not the default `manifests` and `full` backups schedules.
// Default is `true`.
Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
+}
+
+// Configuration for Velero schedules.
+type SpecDistributionModulesDrVeleroSchedulesDefinitions struct {
+ // Configuration for Velero's manifests backup schedule.
+ Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
+
+ // Configuration for Velero's manifests backup schedule.
+ Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+}
+
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct {
+ // The cron expression for the `full` backup schedule (default `0 1 * * *`).
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // EXPERIMENTAL (if you do more than one backups, the following backups after the
+ // first are not automatically restorable, see
+ // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for
+ // the manual restore solution): SnapshotMoveData specifies whether snapshot data
+ // should be moved. Velero will create a new volume from the snapshot and upload
+ // the content to the storageLocation.
+ SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"`
// The Time To Live (TTL) of the backups created by the backup schedules (default
// `720h0m0s`, 30 days). Notice that changing this value will affect only newly
@@ -639,19 +679,29 @@ type SpecDistributionModulesDrVeleroSchedules struct {
Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
}
-// Configuration for Velero's schedules cron.
-type SpecDistributionModulesDrVeleroSchedulesCron struct {
- // The cron expression for the `full` backup schedule (default `0 1 * * *`).
- Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"`
-
+// Configuration for Velero's manifests backup schedule.
+type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct {
// The cron expression for the `manifests` backup schedule (default `*/15 * * *
// *`).
- Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"`
+ Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"`
+
+ // The Time To Live (TTL) of the backups created by the backup schedules (default
+ // `720h0m0s`, 30 days). Notice that changing this value will affect only newly
+ // created backups, prior backups will keep the old TTL.
+ Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"`
+}
+
+// Configuration for the additional snapshotController component installation.
+type SpecDistributionModulesDrVeleroSnapshotController struct {
+ // Whether to install or not the snapshotController component in the cluster.
+ // Before enabling this field, check if your CSI driver does not have
+ // snapshotController built-in.
+ Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"`
}
type SpecDistributionModulesIngress struct {
// The base domain used for all the KFD infrastructural ingresses. If using the
- // nginx dual type, this value should be the same as the domain associated with
+ // nginx `dual` type, this value should be the same as the domain associated with
// the `internal` ingress class.
BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"`
@@ -667,7 +717,7 @@ type SpecDistributionModulesIngress struct {
// If corresponds to the JSON schema field "if".
If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"`
- // Configurations for the nginx ingress controller package.
+ // Configurations for the Ingress nginx controller package.
Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"`
// Overrides corresponds to the JSON schema field "overrides".
@@ -695,11 +745,13 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct {
// The email address to use during the certificate issuing process.
Email string `json:"email" yaml:"email" mapstructure:"email"`
- // Name of the clusterIssuer
+ // The name of the clusterIssuer.
Name string `json:"name" yaml:"name" mapstructure:"name"`
- // List of challenge solvers to use instead of the default one for the `http01`
- // challenge.
+ // The list of challenge solvers to use instead of the default one for the
+ // `http01` challenge. Check [cert manager's
+ // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types)
+ // for examples for this field.
Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"`
// The type of the clusterIssuer. Only `http01` challenge is supported for
@@ -723,7 +775,7 @@ type SpecDistributionModulesIngressNginx struct {
// Tls corresponds to the JSON schema field "tls".
Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"`
- // The type of the nginx ingress controller, options are:
+ // The type of the Ingress nginx controller, options are:
// - `none`: no ingress controller will be installed and no infrastructural
// ingresses will be created.
// - `single`: a single ingress controller with ingress class `nginx` will be
@@ -733,6 +785,8 @@ type SpecDistributionModulesIngressNginx struct {
// `internal` ingress class intended for private ingresses and one for the
// `external` ingress class intended for public ingresses. KFD infrastructural
// ingresses wil use the `internal` ingress class when using the dual type.
+ //
+ // Default is `single`.
Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"`
}
@@ -782,11 +836,11 @@ type SpecDistributionModulesIngressOverrides struct {
// Ingresses corresponds to the JSON schema field "ingresses".
Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
- // Set to override the node selector used to place the pods of the Ingress module
+ // Set to override the node selector used to place the pods of the Ingress module.
NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
// Set to override the tolerations that will be added to the pods of the Ingress
- // module
+ // module.
Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
}
@@ -823,14 +877,17 @@ type SpecDistributionModulesLogging struct {
// - `opensearch`: will deploy and configure the Logging Operator and an
// OpenSearch cluster (can be single or triple for HA) where the logs will be
// stored.
- // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.
- // - `customOuputs`: the Logging Operator will be deployed and installed but with
- // no local storage, you will have to create the needed Outputs and ClusterOutputs
- // to ship the logs to your desired storage.
+ // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for
+ // storage.
+ // - `customOuputs`: the Logging Operator will be deployed and installed but
+ // without in-cluster storage, you will have to create the needed Outputs and
+ // ClusterOutputs to ship the logs to your desired storage.
+ //
+ // Default is `opensearch`.
Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"`
}
-// DEPRECATED in latest versions of KFD.
+// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.
type SpecDistributionModulesLoggingCerebro struct {
// Overrides corresponds to the JSON schema field "overrides".
Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
@@ -901,6 +958,18 @@ type SpecDistributionModulesLoggingLoki struct {
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the
+ // time series database from BoltDB to TSDB and the schema from v11 to v13 that it
+ // uses to store the logs.
+ //
+ // The value of this field will determine the date when Loki will start writing
+ // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB
+ // and schema will be kept until they expire for reading purposes.
+ //
+ // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example:
+ // `2024-11-18`.
+ TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"`
}
type SpecDistributionModulesLoggingLokiBackend string
@@ -955,7 +1024,8 @@ type SpecDistributionModulesLoggingOpensearch struct {
// Resources corresponds to the JSON schema field "resources".
Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
- // The storage size for the OpenSearch volumes.
+ // The storage size for the OpenSearch volumes. Follows Kubernetes resources
+ // storage requests. Default is `150Gi`.
StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
// The type of OpenSearch deployment. One of: `single` for a single replica or
@@ -1019,16 +1089,18 @@ type SpecDistributionModulesMonitoring struct {
//
// - `none`: will disable the whole monitoring stack.
// - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus
- // instace, Alertmanager, a set of alert rules, exporters needed to monitor all
+ // instance, Alertmanager, a set of alert rules, exporters needed to monitor all
// the components of the cluster, Grafana and a series of dashboards to view the
// collected metrics, and more.
- // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus
- // in Agent mode (no alerting, no queries, no storage), and all the exporters
- // needed to get metrics for the status of the cluster and the workloads. Useful
- // when having a centralized (remote) Prometheus where to ship the metrics and not
- // storing them locally in the cluster.
+ // - `prometheusAgent`: will install Prometheus operator, an instance of
+ // Prometheus in Agent mode (no alerting, no queries, no storage), and all the
+ // exporters needed to get metrics for the status of the cluster and the
+ // workloads. Useful when having a centralized (remote) Prometheus where to ship
+ // the metrics and not storing them locally in the cluster.
// - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir
// that allows for longer retention of metrics and the usage of Object Storage.
+ //
+ // Default is `prometheus`.
Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"`
// X509Exporter corresponds to the JSON schema field "x509Exporter".
@@ -1037,7 +1109,7 @@ type SpecDistributionModulesMonitoring struct {
type SpecDistributionModulesMonitoringAlertManager struct {
// The webhook URL to send dead man's switch monitoring, for example to use with
- // healthchecks.io
+ // healthchecks.io.
DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"`
// Set to false to avoid installing the Prometheus rules (alerts) included with
@@ -1117,7 +1189,7 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
// The bucket name of the external S3-compatible object storage.
BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
- // External S3-compatible endpoint for Mimir's storage.
+ // The external S3-compatible endpoint for Mimir's storage.
Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
// If true, will use HTTP as protocol instead of HTTPS.
@@ -1127,1248 +1199,1227 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct {
SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
}
-// Configuration for Monitoring's MinIO deployment.
-type SpecDistributionModulesMonitoringMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The PVC size for each MinIO disk, 6 disks total.
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesTracingType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
+ }
+ *j = SpecDistributionModulesTracingType(v)
+ return nil
}
-type SpecDistributionModulesMonitoringMinioRootUser struct {
- // The password for the default MinIO root user.
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the default MinIO root user.
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["baseDomain"]; !ok || v == nil {
+ return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
+ }
+ if v, ok := raw["nginx"]; !ok || v == nil {
+ return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ }
+ type Plain SpecDistributionModulesIngress
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngress(plain)
+ return nil
}
-type SpecDistributionModulesMonitoringPrometheus struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
-
- // The retention size for the `k8s` Prometheus instance.
- RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
-
- // The retention time for the `k8s` Prometheus instance.
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-
- // The storage size for the `k8s` Prometheus instance.
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ }
+ *j = SpecDistributionModulesIngressNginxType(v)
+ return nil
}
-type SpecDistributionModulesMonitoringPrometheusAgent struct {
- // Set this option to ship the collected metrics to a remote Prometheus receiver.
- //
- // `remoteWrite` is an array of objects that allows configuring the
- // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
- // Prometheus. The objects in the array follow [the same schema as in the
- // prometheus
- // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
- RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
-
- // Resources corresponds to the JSON schema field "resources".
- Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
+ "none",
+ "single",
+ "dual",
}
-type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
-
-type SpecDistributionModulesMonitoringType string
-
-const (
- SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
- SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
- SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
- SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
-)
-
-type SpecDistributionModulesMonitoringX509Exporter struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["audit"]; !ok || v == nil {
+ return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["errors"]; !ok || v == nil {
+ return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["events"]; !ok || v == nil {
+ return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["infra"]; !ok || v == nil {
+ return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["ingressNginx"]; !ok || v == nil {
+ return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["kubernetes"]; !ok || v == nil {
+ return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdCommon"]; !ok || v == nil {
+ return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ if v, ok := raw["systemdEtcd"]; !ok || v == nil {
+ return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ }
+ type Plain SpecDistributionModulesLoggingCustomOutputs
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesLoggingCustomOutputs(plain)
+ return nil
}
-// Configuration for the Networking module.
-type SpecDistributionModulesNetworking struct {
- // Cilium corresponds to the JSON schema field "cilium".
- Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
- TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
-
- // The type of CNI plugin to use, either `calico` (default, via the Tigera
- // Operator) or `cilium`.
- Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
+ }
+ type Plain SpecDistributionModulesIngressNginxTLS
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesIngressNginxTLS(plain)
+ return nil
}
-type SpecDistributionModulesNetworkingCilium struct {
- // The mask size to use for the Pods network on each node.
- MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Allows specifing a CIDR for the Pods network different from
- // `.spec.kubernetes.podCidr`. If not set the default is to use
- // `.spec.kubernetes.podCidr`.
- PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"`
+var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
-type SpecDistributionModulesNetworkingTigeraOperator struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesNetworkingType string
-
-const (
- SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico"
- SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium"
-)
-
-// Configuration for the Policy module.
-type SpecDistributionModulesPolicy struct {
- // Gatekeeper corresponds to the JSON schema field "gatekeeper".
- Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
-
- // Kyverno corresponds to the JSON schema field "kyverno".
- Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The type of policy enforcement to use, either `none`, `gatekeeper` or
- // `kyverno`.
- Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-// Configuration for the Gatekeeper package.
-type SpecDistributionModulesPolicyGatekeeper struct {
- // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
- // enforce the constraints on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // The default enforcement action to use for the included constraints. `deny` will
- // block the admission when violations to the policies are found, `warn` will show
- // a message to the user but will admit the violating requests and `dryrun` won't
- // give any feedback to the user but it will log the violations.
- EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
-
- // Set to `false` to avoid installing the default Gatekeeper policies (constraints
- // templates and constraints) included with the distribution.
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-}
-
-type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
-
-const (
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
- SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
-)
-
-// Configuration for the Kyverno package.
-type SpecDistributionModulesPolicyKyverno struct {
- // This parameter adds namespaces to Kyverno's exemption list, so it will not
- // enforce the policies on them.
- AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
-
- // Set to `false` to avoid installing the default Kyverno policies included with
- // distribution.
- InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The validation failure action to use for the policies, `Enforce` will block
- // when a request does not comply with the policies and `Audit` will not block but
- // log when a request does not comply with the policies.
- ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
-}
-
-type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
-
-const (
- SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
- SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
-)
-
-type SpecDistributionModulesPolicyType string
-
-const (
- SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
- SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
- SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
-)
-
-// Configuration for the Tracing module.
-type SpecDistributionModulesTracing struct {
- // Minio corresponds to the JSON schema field "minio".
- Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // Tempo corresponds to the JSON schema field "tempo".
- Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
-
- // The type of tracing to use, either `none` or `tempo`. `none` will disable the
- // Tracing module and `tempo` will install a Grafana Tempo deployment.
- Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
-}
-
-// Configuration for Tracing's MinIO deployment.
-type SpecDistributionModulesTracingMinio struct {
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // RootUser corresponds to the JSON schema field "rootUser".
- RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
-
- // The PVC size for each MinIO disk, 6 disks total.
- StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
-}
-
-type SpecDistributionModulesTracingMinioRootUser struct {
- // The password for the default MinIO root user.
- Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
-
- // The username for the default MinIO root user.
- Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
-}
-
-// Configuration for the Tempo package.
-type SpecDistributionModulesTracingTempo struct {
- // The storage backend type for Tempo. `minio` will use an in-cluster MinIO
- // deployment for object storage, `externalEndpoint` can be used to point to an
- // external S3-compatible object storage instead of deploying an in-cluster MinIO.
- Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
-
- // Configuration for Tempo's external storage backend.
- ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
-
- // Overrides corresponds to the JSON schema field "overrides".
- Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
-
- // The retention time for the traces stored in Tempo.
- RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
-}
-
-type SpecDistributionModulesTracingTempoBackend string
-
-const (
- SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
- SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
-)
-
-// Configuration for Tempo's external storage backend.
-type SpecDistributionModulesTracingTempoExternalEndpoint struct {
- // The access key ID (username) for the external S3-compatible bucket.
- AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
-
- // The bucket name of the external S3-compatible object storage.
- BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
-
- // External S3-compatible endpoint for Tempo's storage.
- Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
-
- // If true, will use HTTP as protocol instead of HTTPS.
- Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
-
- // The secret access key (password) for the external S3-compatible bucket.
- SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
-}
-
-type SpecDistributionModulesTracingType string
-
-const (
- SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
- SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
-)
-
-// Defines the Kubernetes components configuration and the values needed for the
-// kubernetes phase of furyctl.
-type SpecKubernetes struct {
- // Advanced corresponds to the JSON schema field "advanced".
- Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"`
-
- // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible".
- AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"`
-
- // The address for the Kubernetes control plane. Usually a DNS entry pointing to a
- // Load Balancer on port 6443.
- ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"`
-
- // The DNS zone of the machines. It will be appended to the name of each host to
- // generate the `kubernetes_hostname` in the Ansible inventory file. It is also
- // used to calculate etcd's initial cluster value.
- DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"`
-
- // LoadBalancers corresponds to the JSON schema field "loadBalancers".
- LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"`
-
- // Masters corresponds to the JSON schema field "masters".
- Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"`
-
- // Nodes corresponds to the JSON schema field "nodes".
- Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"`
-
- // The path to the folder where the PKI files for Kubernetes and etcd are stored.
- PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"`
-
- // The subnet CIDR to use for the Pods network.
- PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"`
-
- // Proxy corresponds to the JSON schema field "proxy".
- Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"`
-
- // Ssh corresponds to the JSON schema field "ssh".
- Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"`
-
- // The subnet CIDR to use for the Services network.
- SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"`
-}
-
-type SpecKubernetesAdvanced struct {
- // AirGap corresponds to the JSON schema field "airGap".
- AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"`
-
- // Cloud corresponds to the JSON schema field "cloud".
- Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"`
-
- // Containerd corresponds to the JSON schema field "containerd".
- Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"`
-
- // Encryption corresponds to the JSON schema field "encryption".
- Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"`
-
- // Oidc corresponds to the JSON schema field "oidc".
- Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"`
-
- // URL of the registry where to pull images from for the Kubernetes phase.
- // (Default is registry.sighup.io/fury/on-premises).
- Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
-
- // Users corresponds to the JSON schema field "users".
- Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
-}
-
-// Advanced configuration for air-gapped installations. Allows setting custom URLs
-// where to download the binaries dependencies from and custom .deb and .rpm
-// package repositories.
-type SpecKubernetesAdvancedAirGap struct {
- // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should
- // be as the one downloaded from containerd GitHub releases page.
- ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"`
-
- // DependenciesOverride corresponds to the JSON schema field
- // "dependenciesOverride".
- DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"`
-
- // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded
- // from
- // `//etcd--linux-.tar.gz`
- EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"`
-
- // Checksum for the runc binary.
- RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"`
-
- // URL where to download the runc binary from.
- RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"`
-}
-
-type SpecKubernetesAdvancedAirGapDependenciesOverride struct {
- // Apt corresponds to the JSON schema field "apt".
- Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"`
-
- // Yum corresponds to the JSON schema field "yum".
- Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"`
-}
-
-type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct {
- // URL where to download the GPG key of the Apt repository. Example:
- // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`
- GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"`
-
- // The GPG key ID of the Apt repository. Example:
- // `36A1D7869245C8950F966E92D8576A8BA88D21E9`
- GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"`
-
- // An indicative name for the Apt repository. Example: `k8s-1.29`
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // A source string for the new Apt repository. Example: `deb
- // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /`
- Repo string `json:"repo" yaml:"repo" mapstructure:"repo"`
-}
-
-type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct {
- // URL where to download the ASCII-armored GPG key of the Yum repository. Example:
- // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`
- GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"`
-
- // If true, the GPG signature check on the packages will be enabled.
- GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"`
-
- // An indicative name for the Yum repository. Example: `k8s-1.29`
- Name string `json:"name" yaml:"name" mapstructure:"name"`
-
- // URL to the directory where the Yum repository's `repodata` directory lives.
- // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/`
- Repo string `json:"repo" yaml:"repo" mapstructure:"repo"`
-
- // If true, the GPG signature check on the `repodata` will be enabled.
- RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"`
-}
-
-type SpecKubernetesAdvancedAnsible struct {
- // Additional configuration to append to the ansible.cfg file
- Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"`
-
- // The Python interpreter to use for running Ansible. Example: python3
- PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
+ }
+ *j = SpecDistributionModulesLoggingLokiBackend(v)
+ return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
+ if v, ok := raw["ca"]; !ok || v == nil {
+ return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
}
- type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
+ if v, ok := raw["cert"]; !ok || v == nil {
+ return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ if v, ok := raw["key"]; !ok || v == nil {
+ return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ }
+ type Plain SpecDistributionModulesIngressNginxTLSSecret
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
- return nil
-}
-
-var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
- "none",
- "basicAuth",
- "sso",
-}
-
-var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
- "none",
- "prometheus",
- "prometheusAgent",
- "mimir",
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
- }
- *j = SpecDistributionModulesMonitoringType(v)
+ *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
+ for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
}
- *j = SpecDistributionModulesAuthProviderType(v)
+ *j = SpecDistributionModulesIngressNginxTLSProvider(v)
return nil
}
+var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
+ "certManager",
+ "secret",
+ "none",
+}
+
+type TypesKubeResourcesLimits struct {
+ // The CPU limit for the Pod. Example: `1000m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory limit for the Pod. Example: `1G`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+}
+
+type TypesKubeResourcesRequests struct {
+ // The CPU request for the Pod, in cores. Example: `500m`.
+ Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+
+ // The memory request for the Pod. Example: `500M`.
+ Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+}
+
+type TypesKubeResources struct {
+ // Limits corresponds to the JSON schema field "limits".
+ Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
+
+ // Requests corresponds to the JSON schema field "requests".
+ Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
+ if v, ok := raw["clusterIssuer"]; !ok || v == nil {
+ return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
}
- type Plain SpecDistributionModulesAuthProvider
+ type Plain SpecDistributionModulesIngressCertManager
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProvider(plain)
+ *j = SpecDistributionModulesIngressCertManager(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["ip"]; !ok || v == nil {
- return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required")
+ if v, ok := raw["tsdbStartDate"]; !ok || v == nil {
+ return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required")
}
- type Plain SpecKubernetesLoadBalancersHost
+ type Plain SpecDistributionModulesLoggingLoki
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecKubernetesLoadBalancersHost(plain)
+ *j = SpecDistributionModulesLoggingLoki(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
+ if v, ok := raw["email"]; !ok || v == nil {
+ return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
}
- type Plain SpecDistributionModulesLogging
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ }
+ type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLogging(plain)
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
return nil
}
-type TypesFuryModuleOverridesIngress struct {
- // If true, the ingress will not have authentication even if
- // `.spec.modules.auth.provider.type` is SSO or Basic Auth.
- DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
-
- // Use this host for the ingress instead of the default one.
- Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
-
- // Use this ingress class for the ingress instead of the default one.
- IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
-}
-
-var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
- "create",
- "replace",
- "merge",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesMonitoring
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
}
- *j = SpecDistributionModulesMonitoring(plain)
+ *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
return nil
}
-type TypesCidr string
-
-type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
-
-// Override the common configuration with a particular configuration for the
-// module.
-type TypesFuryModuleOverrides struct {
- // Ingresses corresponds to the JSON schema field "ingresses".
- Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
-
- // Set to override the node selector used to place the pods of the module.
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
-
- // Set to override the tolerations that will be added to the pods of the module.
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
-}
-
-var enumValues_SpecDistributionModulesDrType = []interface{}{
- "none",
- "on-premises",
+var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
+ "http01",
}
-var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
- "calico",
- "cilium",
+var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
+ "single",
+ "triple",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
}
- *j = SpecDistributionModulesNetworkingType(v)
+ *j = SpecDistributionModulesLoggingOpensearchType(v)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesDr: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
+ type Plain SpecDistributionModulesDr
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
}
- *j = SpecDistributionModulesDrType(v)
+ *j = SpecDistributionModulesDr(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingType {
+ for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v)
}
- *j = SpecDistributionModulesLoggingType(v)
+ *j = SpecDistributionModulesDrVeleroBackend(v)
return nil
}
-var enumValues_SpecDistributionModulesLoggingType = []interface{}{
- "none",
- "opensearch",
- "loki",
- "customOutputs",
+var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesNetworking: required")
+ return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
}
- type Plain SpecDistributionModulesNetworking
+ type Plain SpecDistributionModulesLoggingOpensearch
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesNetworking(plain)
+ *j = SpecDistributionModulesLoggingOpensearch(plain)
return nil
}
-var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{
- "minio",
- "externalEndpoint",
-}
-
-var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
- "deny",
- "dryrun",
- "warn",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
+ for _, expected := range enumValues_SpecDistributionModulesDrType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v)
}
- *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
+ *j = SpecDistributionModulesDrType(v)
return nil
}
+var enumValues_SpecDistributionModulesDrType = []interface{}{
+ "none",
+ "on-premises",
+}
+
+var enumValues_SpecDistributionModulesLoggingType = []interface{}{
+ "none",
+ "opensearch",
+ "loki",
+ "customOutputs",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend {
+ for _, expected := range enumValues_SpecDistributionModulesLoggingType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v)
}
- *j = SpecDistributionModulesDrVeleroBackend(v)
+ *j = SpecDistributionModulesLoggingType(v)
return nil
}
+// Override the common configuration with a particular configuration for the
+// module.
+type TypesFuryModuleOverrides struct {
+ // Ingresses corresponds to the JSON schema field "ingresses".
+ Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"`
+
+ // Set to override the node selector used to place the pods of the module.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the module.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
+type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress
+
+type TypesFuryModuleOverridesIngress struct {
+ // If true, the ingress will not have authentication even if
+ // `.spec.modules.auth.provider.type` is SSO or Basic Auth.
+ DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"`
+
+ // Use this host for the ingress instead of the default one.
+ Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"`
+
+ // Use this ingress class for the ingress instead of the default one.
+ IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required")
+ if v, ok := raw["provider"]; !ok || v == nil {
+ return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
}
- type Plain SpecDistributionModulesLoggingOpensearch
+ type Plain SpecDistributionModulesAuth
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLoggingOpensearch(plain)
+ *j = SpecDistributionModulesAuth(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesDr: required")
+ return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required")
}
- type Plain SpecDistributionModulesDr
+ type Plain SpecDistributionModulesAuthProvider
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesDr(plain)
+ *j = SpecDistributionModulesAuthProvider(plain)
return nil
}
-var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{
- "http01",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["enforcementAction"]; !ok || v == nil {
- return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
- }
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesLogging: required")
}
- type Plain SpecDistributionModulesPolicyGatekeeper
+ type Plain SpecDistributionModulesLogging
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyGatekeeper(plain)
+ *j = SpecDistributionModulesLogging(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType {
+ for _, expected := range enumValues_SpecDistributionModulesAuthProviderType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v)
}
- *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v)
+ *j = SpecDistributionModulesAuthProviderType(v)
return nil
}
-var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
- "Audit",
- "Enforce",
+var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{
+ "none",
+ "basicAuth",
+ "sso",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["password"]; !ok || v == nil {
+ return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
+ if v, ok := raw["username"]; !ok || v == nil {
+ return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
}
- *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
+ type Plain SpecDistributionModulesAuthProviderBasicAuth
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
return nil
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["host"]; !ok || v == nil {
+ return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v)
+ if v, ok := raw["ingressClass"]; !ok || v == nil {
+ return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
}
- *j = SpecDistributionModulesLoggingOpensearchType(v)
+ type Plain SpecDistributionModulesAuthOverridesIngress
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesAuthOverridesIngress(plain)
return nil
}
-var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{
- "single",
- "triple",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["email"]; !ok || v == nil {
- return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required")
+ if v, ok := raw["enabled"]; !ok || v == nil {
+ return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required")
}
- type Plain SpecDistributionModulesIngressCertManagerClusterIssuer
+ type Plain SpecDistributionModulesAuthOIDCKubernetesAuth
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain)
+ *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain)
return nil
}
+var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
+ "minio",
+ "externalEndpoint",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
+ }
+ *j = SpecDistributionModulesMonitoringMimirBackend(v)
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
- return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
- }
- if v, ok := raw["validationFailureAction"]; !ok || v == nil {
- return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
+ if v, ok := raw["connectors"]; !ok || v == nil {
+ return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
}
- type Plain SpecDistributionModulesPolicyKyverno
+ type Plain SpecDistributionModulesAuthDex
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicyKyverno(plain)
+ *j = SpecDistributionModulesAuthDex(plain)
return nil
}
+type TypesFuryModuleComponentOverrides struct {
+ // Set to override the node selector used to place the pods of the package.
+ NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+
+ // Set to override the tolerations that will be added to the pods of the package.
+ Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["clusterIssuer"]; !ok || v == nil {
- return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required")
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
}
- type Plain SpecDistributionModulesIngressCertManager
+ type Plain SpecDistributionCustomPatchesSecretGeneratorResource
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressCertManager(plain)
+ *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
return nil
}
-var enumValues_SpecDistributionModulesPolicyType = []interface{}{
- "none",
- "gatekeeper",
- "kyverno",
-}
-
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesPolicyType {
+ for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
}
- *j = SpecDistributionModulesPolicyType(v)
+ *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{
- "certManager",
- "secret",
+type SpecDistributionModulesMonitoringMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
+
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Monitoring's MinIO deployment.
+type SpecDistributionModulesMonitoringMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheus struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+
+ // The retention size for the `k8s` Prometheus instance.
+ RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"`
+
+ // The retention time for the `k8s` Prometheus instance.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+
+ // The storage size for the `k8s` Prometheus instance.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{}
+
+type SpecDistributionModulesMonitoringPrometheusAgent struct {
+ // Set this option to ship the collected metrics to a remote Prometheus receiver.
+ //
+ // `remoteWrite` is an array of objects that allows configuring the
+ // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for
+ // Prometheus. The objects in the array follow [the same schema as in the
+ // prometheus
+ // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).
+ RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"`
+
+ // Resources corresponds to the JSON schema field "resources".
+ Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"`
+}
+
+type SpecDistributionModulesMonitoringType string
+
+var enumValues_SpecDistributionModulesMonitoringType = []interface{}{
"none",
+ "prometheus",
+ "prometheusAgent",
+ "mimir",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider {
+ for _, expected := range enumValues_SpecDistributionModulesMonitoringType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v)
}
- *j = SpecDistributionModulesIngressNginxTLSProvider(v)
+ *j = SpecDistributionModulesMonitoringType(v)
return nil
}
-type TypesKubeResources struct {
- // Limits corresponds to the JSON schema field "limits".
- Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"`
+const (
+ SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none"
+ SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus"
+ SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent"
+ SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir"
+)
- // Requests corresponds to the JSON schema field "requests".
- Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"`
+type SpecDistributionModulesMonitoringX509Exporter struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
-type TypesKubeResourcesRequests struct {
- // The cpu request for the loki pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
-
- // The memory request for the prometheus pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{
+ "create",
+ "replace",
+ "merge",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
if v, ok := raw["type"]; !ok || v == nil {
- return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
+ return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required")
}
- type Plain SpecDistributionModulesPolicy
+ type Plain SpecDistributionModulesMonitoring
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesPolicy(plain)
+ *j = SpecDistributionModulesMonitoring(plain)
return nil
}
-type TypesKubeResourcesLimits struct {
- // The cpu limit for the loki pods
- Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"`
+type TypesCidr string
- // The memory limit for the prometheus pods
- Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"`
+type SpecDistributionModulesNetworkingCilium struct {
+ // The mask size to use for the Pods network on each node.
+ MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Allows specifing a CIDR for the Pods network different from
+ // `.spec.kubernetes.podCidr`. If not set the default is to use
+ // `.spec.kubernetes.podCidr`.
+ PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"`
+}
+
+type SpecDistributionModulesNetworkingTigeraOperator struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+}
+
+type SpecDistributionModulesNetworkingType string
+
+var enumValues_SpecDistributionModulesNetworkingType = []interface{}{
+ "calico",
+ "cilium",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
+func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- if v, ok := raw["ca"]; !ok || v == nil {
- return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required")
- }
- if v, ok := raw["cert"]; !ok || v == nil {
- return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required")
- }
- if v, ok := raw["key"]; !ok || v == nil {
- return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required")
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesNetworkingType {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
}
- type Plain SpecDistributionModulesIngressNginxTLSSecret
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v)
}
- *j = SpecDistributionModulesIngressNginxTLSSecret(plain)
+ *j = SpecDistributionModulesNetworkingType(v)
return nil
}
+const (
+ SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico"
+ SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium"
+)
+
+// Configuration for the Networking module.
+type SpecDistributionModulesNetworking struct {
+ // Cilium corresponds to the JSON schema field "cilium".
+ Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // TigeraOperator corresponds to the JSON schema field "tigeraOperator".
+ TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"`
+
+ // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`.
+ // Default is `calico`.
+ Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesNetworking: required")
}
- type Plain SpecDistributionModulesIngressNginxTLS
+ type Plain SpecDistributionModulesNetworking
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngressNginxTLS(plain)
+ *j = SpecDistributionModulesNetworking(plain)
return nil
}
-var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
- "minio",
- "externalEndpoint",
+type SpecDistributionModulesPolicyGatekeeperEnforcementAction string
+
+var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{
+ "deny",
+ "dryrun",
+ "warn",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v)
}
- *j = SpecDistributionModulesTracingTempoBackend(v)
+ *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v)
return nil
}
-var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{
- "none",
- "single",
- "dual",
+const (
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun"
+ SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn"
+)
+
+// Configuration for the Gatekeeper package.
+type SpecDistributionModulesPolicyGatekeeper struct {
+ // This parameter adds namespaces to Gatekeeper's exemption list, so it will not
+ // enforce the constraints on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
+
+ // The default enforcement action to use for the included constraints. `deny` will
+ // block the admission when violations to the policies are found, `warn` will show
+ // a message to the user but will admit the violating requests and `dryrun` won't
+ // give any feedback to the user but it will log the violations.
+ EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"`
+
+ // Set to `false` to avoid installing the default Gatekeeper policies (constraints
+ // templates and constraints) included with the distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
+func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
+ if v, ok := raw["enforcementAction"]; !ok || v == nil {
+ return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required")
}
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v)
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required")
}
- *j = SpecDistributionModulesLoggingLokiBackend(v)
+ type Plain SpecDistributionModulesPolicyGatekeeper
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionModulesPolicyGatekeeper(plain)
return nil
}
-var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{
- "minio",
- "externalEndpoint",
+type SpecDistributionModulesPolicyKyvernoValidationFailureAction string
+
+var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{
+ "Audit",
+ "Enforce",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesIngressNginxType {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v)
}
- *j = SpecDistributionModulesIngressNginxType(v)
+ *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v)
return nil
}
+const (
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit"
+ SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce"
+)
+
+// Configuration for the Kyverno package.
+type SpecDistributionModulesPolicyKyverno struct {
+ // This parameter adds namespaces to Kyverno's exemption list, so it will not
+ // enforce the policies on them.
+ AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"`
+
+ // Set to `false` to avoid installing the default Kyverno policies included with
+ // distribution.
+ InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The validation failure action to use for the policies, `Enforce` will block
+ // when a request does not comply with the policies and `Audit` will not block but
+ // log when a request does not comply with the policies.
+ ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["audit"]; !ok || v == nil {
- return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["errors"]; !ok || v == nil {
- return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["events"]; !ok || v == nil {
- return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["infra"]; !ok || v == nil {
- return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["ingressNginx"]; !ok || v == nil {
- return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["kubernetes"]; !ok || v == nil {
- return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required")
- }
- if v, ok := raw["systemdCommon"]; !ok || v == nil {
- return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required")
+ if v, ok := raw["installDefaultPolicies"]; !ok || v == nil {
+ return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required")
}
- if v, ok := raw["systemdEtcd"]; !ok || v == nil {
- return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required")
+ if v, ok := raw["validationFailureAction"]; !ok || v == nil {
+ return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required")
}
- type Plain SpecDistributionModulesLoggingCustomOutputs
+ type Plain SpecDistributionModulesPolicyKyverno
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesLoggingCustomOutputs(plain)
+ *j = SpecDistributionModulesPolicyKyverno(plain)
return nil
}
-var enumValues_SpecDistributionModulesTracingType = []interface{}{
+type SpecDistributionModulesPolicyType string
+
+var enumValues_SpecDistributionModulesPolicyType = []interface{}{
"none",
- "tempo",
+ "gatekeeper",
+ "kyverno",
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error {
var v string
if err := json.Unmarshal(b, &v); err != nil {
return err
}
var ok bool
- for _, expected := range enumValues_SpecDistributionModulesTracingType {
+ for _, expected := range enumValues_SpecDistributionModulesPolicyType {
if reflect.DeepEqual(v, expected) {
ok = true
break
}
}
if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v)
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v)
}
- *j = SpecDistributionModulesTracingType(v)
+ *j = SpecDistributionModulesPolicyType(v)
return nil
}
+const (
+ SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none"
+ SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper"
+ SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno"
+)
+
+// Configuration for the Policy module.
+type SpecDistributionModulesPolicy struct {
+ // Gatekeeper corresponds to the JSON schema field "gatekeeper".
+ Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"`
+
+ // Kyverno corresponds to the JSON schema field "kyverno".
+ Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The type of policy enforcement to use, either `none`, `gatekeeper` or
+ // `kyverno`.
+ //
+ // Default is `none`.
+ Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error {
+func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["baseDomain"]; !ok || v == nil {
- return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required")
- }
- if v, ok := raw["nginx"]; !ok || v == nil {
- return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required")
+ if v, ok := raw["type"]; !ok || v == nil {
+ return fmt.Errorf("field type in SpecDistributionModulesPolicy: required")
}
- type Plain SpecDistributionModulesIngress
+ type Plain SpecDistributionModulesPolicy
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesIngress(plain)
+ *j = SpecDistributionModulesPolicy(plain)
return nil
}
-var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{
+type SpecDistributionModulesTracingMinioRootUser struct {
+ // The password for the default MinIO root user.
+ Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"`
+
+ // The username for the default MinIO root user.
+ Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"`
+}
+
+// Configuration for Tracing's MinIO deployment.
+type SpecDistributionModulesTracingMinio struct {
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // RootUser corresponds to the JSON schema field "rootUser".
+ RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"`
+
+ // The PVC size for each MinIO disk, 6 disks total.
+ StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"`
+}
+
+type SpecDistributionModulesTracingTempoBackend string
+
+var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{
"minio",
"externalEndpoint",
}
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error {
+ var v string
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ var ok bool
+ for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend {
+ if reflect.DeepEqual(v, expected) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v)
+ }
+ *j = SpecDistributionModulesTracingTempoBackend(v)
+ return nil
+}
+
+const (
+ SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio"
+ SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint"
+)
+
+// Configuration for Tempo's external storage backend.
+type SpecDistributionModulesTracingTempoExternalEndpoint struct {
+ // The access key ID (username) for the external S3-compatible bucket.
+ AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"`
+
+ // The bucket name of the external S3-compatible object storage.
+ BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"`
+
+ // The external S3-compatible endpoint for Tempo's storage.
+ Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"`
+
+ // If true, will use HTTP as protocol instead of HTTPS.
+ Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"`
+
+ // The secret access key (password) for the external S3-compatible bucket.
+ SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"`
+}
+
+// Configuration for the Tempo package.
+type SpecDistributionModulesTracingTempo struct {
+ // The storage backend type for Tempo. `minio` will use an in-cluster MinIO
+ // deployment for object storage, `externalEndpoint` can be used to point to an
+ // external S3-compatible object storage instead of deploying an in-cluster MinIO.
+ Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"`
+
+ // Configuration for Tempo's external storage backend.
+ ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // The retention time for the traces stored in Tempo.
+ RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"`
+}
+
+type SpecDistributionModulesTracingType string
+
+var enumValues_SpecDistributionModulesTracingType = []interface{}{
+ "none",
+ "tempo",
+}
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2387,6 +2438,29 @@ func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error {
return nil
}
+const (
+ SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none"
+ SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo"
+)
+
+// Configuration for the Tracing module.
+type SpecDistributionModulesTracing struct {
+ // Minio corresponds to the JSON schema field "minio".
+ Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"`
+
+ // Overrides corresponds to the JSON schema field "overrides".
+ Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"`
+
+ // Tempo corresponds to the JSON schema field "tempo".
+ Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"`
+
+ // The type of tracing to use, either `none` or `tempo`. `none` will disable the
+ // Tracing module and `tempo` will install a Grafana Tempo deployment.
+ //
+ // Default is `tempo`.
+ Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2405,23 +2479,22 @@ func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error {
return nil
}
-type SpecKubernetesLoadBalancersKeepalived struct {
- // Set to install keepalived with a floating virtual IP shared between the load
- // balancer hosts for a deployment in High Availability.
- Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"`
-
- // Name of the network interface where to bind the Keepalived virtual IP.
- Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"`
-
- // The Virtual floating IP for Keepalived
- Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"`
-
- // The passphrase for the Keepalived clustering.
- Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"`
-
- // The virtual router ID of Keepalived, must be different from other Keepalived
- // instances in the same network.
- VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"`
+// UnmarshalJSON implements json.Unmarshaler.
+func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(b, &raw); err != nil {
+ return err
+ }
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required")
+ }
+ type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource
+ var plain Plain
+ if err := json.Unmarshal(b, &plain); err != nil {
+ return err
+ }
+ *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain)
+ return nil
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -2471,24 +2544,21 @@ func (j *SpecDistribution) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v)
- }
- *j = SpecDistributionModulesMonitoringMimirBackend(v)
- return nil
+type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct {
+ // URL where to download the GPG key of the Apt repository. Example:
+ // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`
+ GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"`
+
+ // The GPG key ID of the Apt repository. Example:
+ // `36A1D7869245C8950F966E92D8576A8BA88D21E9`
+ GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"`
+
+ // An indicative name for the Apt repository. Example: `k8s-1.29`
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // A source string for the new Apt repository. Example: `deb
+ // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /`
+ Repo string `json:"repo" yaml:"repo" mapstructure:"repo"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -2509,36 +2579,34 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b []
if v, ok := raw["repo"]; !ok || v == nil {
return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required")
}
- type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain)
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["host"]; !ok || v == nil {
- return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required")
- }
- if v, ok := raw["ingressClass"]; !ok || v == nil {
- return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required")
- }
- type Plain SpecDistributionModulesAuthOverridesIngress
+ type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthOverridesIngress(plain)
+ *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain)
return nil
}
+type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct {
+ // URL where to download the ASCII-armored GPG key of the Yum repository. Example:
+ // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key`
+ GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"`
+
+ // If true, the GPG signature check on the packages will be enabled.
+ GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"`
+
+ // An indicative name for the Yum repository. Example: `k8s-1.29`
+ Name string `json:"name" yaml:"name" mapstructure:"name"`
+
+ // URL to the directory where the Yum repository's `repodata` directory lives.
+ // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/`
+ Repo string `json:"repo" yaml:"repo" mapstructure:"repo"`
+
+ // If true, the GPG signature check on the `repodata` will be enabled.
+ RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
@@ -2569,42 +2637,36 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error {
- var v string
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- var ok bool
- for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior {
- if reflect.DeepEqual(v, expected) {
- ok = true
- break
- }
- }
- if !ok {
- return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v)
- }
- *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v)
- return nil
+type SpecKubernetesAdvancedAirGapDependenciesOverride struct {
+ // Apt corresponds to the JSON schema field "apt".
+ Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"`
+
+ // Yum corresponds to the JSON schema field "yum".
+ Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["name"]; !ok || v == nil {
- return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required")
- }
- type Plain SpecDistributionCustomPatchesSecretGeneratorResource
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain)
- return nil
+// Advanced configuration for air-gapped installations. Allows setting custom URLs
+// where to download the binaries dependencies from and custom .deb and .rpm
+// package repositories.
+type SpecKubernetesAdvancedAirGap struct {
+ // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should
+ // be as the one downloaded from containerd GitHub releases page.
+ ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"`
+
+ // DependenciesOverride corresponds to the JSON schema field
+ // "dependenciesOverride".
+ DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"`
+
+ // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded
+ // from
+ // `//etcd--linux-.tar.gz`
+ EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"`
+
+ // Checksum for the runc binary.
+ RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"`
+
+ // URL where to download the runc binary from.
+ RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"`
}
type SpecKubernetesAdvancedCloud struct {
@@ -2723,30 +2785,36 @@ type SpecKubernetesAdvancedUsers struct {
Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"`
}
-type TypesFuryModuleComponentOverrides struct {
- // Set to override the node selector used to place the pods of the package.
- NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"`
+type SpecKubernetesAdvanced struct {
+ // AirGap corresponds to the JSON schema field "airGap".
+ AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"`
- // Set to override the tolerations that will be added to the pods of the package.
- Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"`
+ // Cloud corresponds to the JSON schema field "cloud".
+ Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"`
+
+ // Containerd corresponds to the JSON schema field "containerd".
+ Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"`
+
+ // Encryption corresponds to the JSON schema field "encryption".
+ Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"`
+
+ // Oidc corresponds to the JSON schema field "oidc".
+ Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"`
+
+ // URL of the registry where to pull images from for the Kubernetes phase.
+ // (Default is registry.sighup.io/fury/on-premises).
+ Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"`
+
+ // Users corresponds to the JSON schema field "users".
+ Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["enabled"]; !ok || v == nil {
- return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required")
- }
- type Plain SpecDistributionModulesAuthOIDCKubernetesAuth
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain)
- return nil
+type SpecKubernetesAdvancedAnsible struct {
+ // Additional configuration to append to the ansible.cfg file
+ Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"`
+
+ // The Python interpreter to use for running Ansible. Example: python3
+ PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"`
}
type SpecKubernetesLoadBalancersHost struct {
@@ -2760,46 +2828,63 @@ type SpecKubernetesLoadBalancersHost struct {
}
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["provider"]; !ok || v == nil {
- return fmt.Errorf("field provider in SpecDistributionModulesAuth: required")
+ if v, ok := raw["ip"]; !ok || v == nil {
+ return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required")
}
- type Plain SpecDistributionModulesAuth
+ if v, ok := raw["name"]; !ok || v == nil {
+ return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required")
+ }
+ type Plain SpecKubernetesLoadBalancersHost
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuth(plain)
+ *j = SpecKubernetesLoadBalancersHost(plain)
return nil
}
+type SpecKubernetesLoadBalancersKeepalived struct {
+ // Set to install keepalived with a floating virtual IP shared between the load
+ // balancer hosts for a deployment in High Availability.
+ Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"`
+
+ // Name of the network interface where to bind the Keepalived virtual IP.
+ Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"`
+
+ // The Virtual floating IP for Keepalived
+ Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"`
+
+ // The passphrase for the Keepalived clustering.
+ Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"`
+
+ // The virtual router ID of Keepalived, must be different from other Keepalived
+ // instances in the same network.
+ VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"`
+}
+
// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error {
+func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error {
var raw map[string]interface{}
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
- if v, ok := raw["password"]; !ok || v == nil {
- return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required")
- }
- if v, ok := raw["username"]; !ok || v == nil {
- return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required")
+ if v, ok := raw["enabled"]; !ok || v == nil {
+ return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required")
}
- type Plain SpecDistributionModulesAuthProviderBasicAuth
+ type Plain SpecKubernetesLoadBalancersKeepalived
var plain Plain
if err := json.Unmarshal(b, &plain); err != nil {
return err
}
- *j = SpecDistributionModulesAuthProviderBasicAuth(plain)
+ *j = SpecKubernetesLoadBalancersKeepalived(plain)
return nil
}
-type TypesFileRef string
-
// Configuration for HAProxy stats page. Accessible at http://:1936/stats
type SpecKubernetesLoadBalancersStats struct {
@@ -3107,22 +3192,47 @@ func (j *SpecKubernetesSSH) UnmarshalJSON(b []byte) error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["connectors"]; !ok || v == nil {
- return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required")
- }
- type Plain SpecDistributionModulesAuthDex
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecDistributionModulesAuthDex(plain)
- return nil
+// Defines the Kubernetes components configuration and the values needed for the
+// kubernetes phase of furyctl.
+type SpecKubernetes struct {
+ // Advanced corresponds to the JSON schema field "advanced".
+ Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"`
+
+ // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible".
+ AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"`
+
+ // The address for the Kubernetes control plane. Usually a DNS entry pointing to a
+ // Load Balancer on port 6443.
+ ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"`
+
+ // The DNS zone of the machines. It will be appended to the name of each host to
+ // generate the `kubernetes_hostname` in the Ansible inventory file. It is also
+ // used to calculate etcd's initial cluster value.
+ DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"`
+
+ // LoadBalancers corresponds to the JSON schema field "loadBalancers".
+ LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"`
+
+ // Masters corresponds to the JSON schema field "masters".
+ Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"`
+
+ // Nodes corresponds to the JSON schema field "nodes".
+ Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"`
+
+ // The path to the folder where the PKI files for Kubernetes and etcd are stored.
+ PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"`
+
+ // The subnet CIDR to use for the Pods network.
+ PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"`
+
+ // Proxy corresponds to the JSON schema field "proxy".
+ Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"`
+
+ // Ssh corresponds to the JSON schema field "ssh".
+ Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"`
+
+ // The subnet CIDR to use for the Services network.
+ SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"`
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -3200,6 +3310,10 @@ type SpecPluginsHelmReleases []struct {
// The chart of the release
Chart string `json:"chart" yaml:"chart" mapstructure:"chart"`
+ // Disable running `helm diff` validation when installing the plugin, it will
+ // still be done when upgrading.
+ DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"`
+
// The name of the release
Name string `json:"name" yaml:"name" mapstructure:"name"`
@@ -3560,23 +3674,7 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error {
type TypesEnvRef string
-// UnmarshalJSON implements json.Unmarshaler.
-func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error {
- var raw map[string]interface{}
- if err := json.Unmarshal(b, &raw); err != nil {
- return err
- }
- if v, ok := raw["enabled"]; !ok || v == nil {
- return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required")
- }
- type Plain SpecKubernetesLoadBalancersKeepalived
- var plain Plain
- if err := json.Unmarshal(b, &plain); err != nil {
- return err
- }
- *j = SpecKubernetesLoadBalancersKeepalived(plain)
- return nil
-}
+type TypesFileRef string
type TypesIpAddress string
diff --git a/rules/onpremises-kfd-v1alpha2.yaml b/rules/onpremises-kfd-v1alpha2.yaml
index 7afc29d61..dedf65668 100644
--- a/rules/onpremises-kfd-v1alpha2.yaml
+++ b/rules/onpremises-kfd-v1alpha2.yaml
@@ -13,6 +13,14 @@ kubernetes:
- path: .spec.kubernetes.svcCidr
immutable: true
distribution:
+ - path: .spec.distribution.common.networkPoliciesEnabled
+ immutable: false
+ description: "changes to the network policies have been detected. This will cause the reconfiguration or deletion of the current network policies."
+ safe:
+ - to: none
+ reducers:
+ - key: distributionCommonNetworkPoliciesEnabled
+ lifecycle: pre-apply
- path: .spec.distribution.modules.networking.type
immutable: true
- path: .spec.distribution.modules.logging.type
diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json
index ea44d253c..0b82f017a 100644
--- a/schemas/private/ekscluster-kfd-v1alpha2.json
+++ b/schemas/private/ekscluster-kfd-v1alpha2.json
@@ -1,4 +1,32 @@
{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "type": "string",
+ "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$"
+ },
+ "kind": {
+ "type": "string",
+ "enum": [
+ "EKSCluster"
+ ]
+ },
+ "metadata": {
+ "$ref": "#/$defs/Metadata"
+ },
+ "spec": {
+ "$ref": "#/$defs/Spec"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "apiVersion",
+ "kind",
+ "metadata",
+ "spec"
+ ],
"$defs": {
"Metadata": {
"type": "object",
@@ -6,6 +34,7 @@
"properties": {
"name": {
"type": "string",
+ "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.",
"minLength": 1,
"maxLength": 56
}
@@ -20,17 +49,20 @@
"properties": {
"distributionVersion": {
"type": "string",
+ "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.",
"minLength": 1
},
"region": {
- "$ref": "#/$defs/Types.AwsRegion"
+ "$ref": "#/$defs/Types.AwsRegion",
+ "description": "Defines in which AWS region the cluster and all the related resources will be created."
},
"tags": {
"$ref": "#/$defs/Types.AwsTags",
"description": "This map defines which will be the common tags that will be added to all the resources created on AWS."
},
"toolsConfiguration": {
- "$ref": "#/$defs/Spec.ToolsConfiguration"
+ "$ref": "#/$defs/Spec.ToolsConfiguration",
+ "description": "Configuration for tools used by furyctl, like Terraform."
},
"infrastructure": {
"$ref": "#/$defs/Spec.Infrastructure"
@@ -100,251 +132,148 @@
}
}
},
- "Spec.Distribution": {
+ "Spec.ToolsConfiguration": {
"type": "object",
"additionalProperties": false,
"properties": {
- "common": {
- "$ref": "#/$defs/Spec.Distribution.Common"
- },
- "modules": {
- "$ref": "#/$defs/Spec.Distribution.Modules"
- },
- "customPatches": {
- "$ref": "../public/spec-distribution-custompatches.json"
+ "terraform": {
+ "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform"
}
},
"required": [
- "modules"
- ],
- "if": {
- "allOf": [
- {
- "required": [
- "common"
- ]
- },
- {
- "properties": {
- "common": {
- "required": [
- "provider"
- ]
- }
- }
- },
- {
- "properties": {
- "common": {
- "properties": {
- "provider": {
- "required": [
- "type"
- ]
- }
- }
- }
- }
- },
- {
- "properties": {
- "common": {
- "properties": {
- "provider": {
- "properties": {
- "type": {
- "const": "eks"
- }
- }
- }
- }
- }
- }
- }
- ]
- },
- "then": {
- "properties": {
- "modules": {
- "required": [
- "aws"
- ]
- }
- }
- },
- "else": {
- "properties": {
- "modules": {
- "properties": {
- "aws": {
- "type": "null"
- }
- }
- }
- }
- }
+ "terraform"
+ ]
},
- "Spec.Distribution.Common": {
+ "Spec.ToolsConfiguration.Terraform": {
"type": "object",
"additionalProperties": false,
"properties": {
- "nodeSelector": {
- "$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for all the KFD modules"
- },
- "tolerations": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.KubeToleration"
- },
- "description": "The tolerations that will be added to the pods for all the KFD modules"
- },
- "provider": {
- "$ref": "#/$defs/Spec.Distribution.Common.Provider"
- },
- "relativeVendorPath": {
- "type": "string",
- "description": "The relative path to the vendor directory, does not need to be changed"
- },
- "registry": {
- "type": "string",
- "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too."
+ "state": {
+ "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State"
}
- }
+ },
+ "required": [
+ "state"
+ ]
},
- "Spec.Distribution.Common.Provider": {
+ "Spec.ToolsConfiguration.Terraform.State": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for storing the Terraform state of the cluster.",
"properties": {
- "type": {
- "type": "string",
- "description": "The type of the provider, must be EKS if specified"
+ "s3": {
+ "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3"
}
},
"required": [
- "type"
+ "s3"
]
},
- "Spec.Distribution.Modules": {
+ "Spec.ToolsConfiguration.Terraform.State.S3": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the S3 bucket used to store the Terraform state.",
"properties": {
- "auth": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth"
- },
- "aws": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Aws"
- },
- "dr": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Dr"
- },
- "ingress": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress"
- },
- "logging": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Logging"
- },
- "monitoring": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring"
+ "bucketName": {
+ "$ref": "#/$defs/Types.AwsS3BucketName",
+ "description": "This value defines which bucket will be used to store all the states."
},
- "tracing": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Tracing"
+ "keyPrefix": {
+ "$ref": "#/$defs/Types.AwsS3KeyPrefix",
+ "description": "This value defines which folder will be used to store all the states inside the bucket."
},
- "networking": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Networking"
+ "region": {
+ "$ref": "#/$defs/Types.AwsRegion",
+ "description": "This value defines in which region the bucket is located."
},
- "policy": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Policy"
+ "skipRegionValidation": {
+ "type": "boolean",
+ "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region."
}
},
"required": [
- "dr",
- "ingress",
- "logging",
- "policy"
+ "bucketName",
+ "keyPrefix",
+ "region"
]
},
- "Spec.Distribution.Modules.Auth": {
+ "Spec.Infrastructure": {
"type": "object",
"additionalProperties": false,
"properties": {
- "overrides": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides"
- },
- "provider": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider"
- },
- "baseDomain": {
- "type": "string",
- "description": "The base domain for the auth module"
- },
- "pomerium": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium"
+ "vpc": {
+ "$ref": "#/$defs/Spec.Infrastructure.Vpc"
},
- "dex": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex"
+ "vpn": {
+ "$ref": "#/$defs/Spec.Infrastructure.Vpn"
}
},
- "required": [
- "provider"
- ],
"allOf": [
{
"if": {
- "properties": {
- "provider": {
+ "allOf": [
+ {
"properties": {
- "type": {
- "const": "sso"
+ "vpc": {
+ "type": "null"
+ }
+ }
+ },
+ {
+ "not": {
+ "properties": {
+ "vpn": {
+ "type": "null"
+ }
}
}
}
- }
- },
- "then": {
- "required": [
- "dex",
- "pomerium",
- "baseDomain"
]
},
- "else": {
+ "then": {
"properties": {
- "dex": {
- "type": "null"
- },
- "pomerium": {
- "type": "null"
+ "vpn": {
+ "required": [
+ "vpcId"
+ ]
}
}
}
},
{
"if": {
- "properties": {
- "provider": {
- "properties": {
- "type": {
- "const": "basicAuth"
+ "allOf": [
+ {
+ "not": {
+ "properties": {
+ "vpc": {
+ "type": "null"
+ }
+ }
+ }
+ },
+ {
+ "not": {
+ "properties": {
+ "vpn": {
+ "properties": {
+ "vpcId": {
+ "type": "null"
+ }
+ }
+ }
}
}
}
- }
- },
- "then": {
- "properties": {
- "provider": {
- "required": [
- "basicAuth"
- ]
- }
- }
+ ]
},
- "else": {
+ "then": {
"properties": {
- "provider": {
- "basicAuth": {
- "type": "null"
+ "vpn": {
+ "properties": {
+ "vpcId": {
+ "type": "null"
+ }
}
}
}
@@ -352,304 +281,972 @@
}
]
},
- "Spec.Distribution.Modules.Auth.Dex": {
+ "Spec.Infrastructure.Vpc": {
"type": "object",
+ "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.",
"additionalProperties": false,
"properties": {
- "connectors": {
- "type": "array",
- "description": "The connectors for dex"
- },
- "additionalStaticClients": {
- "type": "array",
- "description": "The additional static clients for dex"
- },
- "expiry": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "signingKeys": {
- "type": "string",
- "description": "Dex signing key expiration time duration (default 6h)."
- },
- "idTokens": {
- "type": "string",
- "description": "Dex ID tokens expiration time duration (default 24h)."
- }
- }
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "network": {
+ "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network"
}
},
"required": [
- "connectors"
+ "network"
]
},
- "Spec.Distribution.Modules.Auth.Overrides": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "nodeSelector": {
- "$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the auth module"
- },
- "tolerations": {
- "type": [
- "array",
- "null"
- ],
- "items": {
- "$ref": "#/$defs/Types.KubeToleration"
- },
- "description": "The tolerations that will be added to the pods for the auth module"
- },
- "ingresses": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress"
- }
- }
- }
- },
- "Spec.Distribution.Modules.Auth.Overrides.Ingress": {
+ "Spec.Infrastructure.Vpc.Network": {
"type": "object",
"additionalProperties": false,
"properties": {
- "host": {
- "type": "string",
- "description": "The host of the ingress"
+ "cidr": {
+ "$ref": "#/$defs/Types.Cidr",
+ "description": "The network CIDR for the VPC that will be created"
},
- "ingressClass": {
- "type": "string",
- "description": "The ingress class of the ingress"
+ "subnetsCidrs": {
+ "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs"
}
},
"required": [
- "host",
- "ingressClass"
+ "cidr",
+ "subnetsCidrs"
]
},
- "Spec.Distribution.Modules.Auth.Pomerium": {
- "$ref": "../public/spec-distribution-modules-auth-pomerium.json"
- },
- "Spec.Distribution.Modules.Auth.Provider": {
+ "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": {
"type": "object",
+ "description": "Network CIDRS configuration for private and public subnets.",
"additionalProperties": false,
"properties": {
- "type": {
- "type": "string",
- "enum": [
- "none",
- "basicAuth",
- "sso"
- ],
- "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***"
+ "private": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created"
},
- "basicAuth": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth"
+ "public": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created"
}
},
"required": [
- "type"
+ "private",
+ "public"
]
},
- "Spec.Distribution.Modules.Auth.Provider.BasicAuth": {
+ "Spec.Infrastructure.Vpn": {
"type": "object",
+ "description": "Configuration for the VPN server instances.",
"additionalProperties": false,
"properties": {
- "username": {
+ "instances": {
+ "type": "integer",
+ "description": "The number of VPN server instances to create, `0` to skip the creation."
+ },
+ "port": {
+ "$ref": "#/$defs/Types.TcpPort",
+ "description": "The port where each OpenVPN server will listen for connections."
+ },
+ "instanceType": {
"type": "string",
- "description": "The username for the basic auth"
+ "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`."
},
- "password": {
+ "diskSize": {
+ "type": "integer",
+ "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB."
+ },
+ "operatorName": {
"type": "string",
- "description": "The password for the basic auth"
- }
- },
- "required": [
- "username",
- "password"
- ]
- },
- "Spec.Distribution.Modules.Aws": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "clusterAutoscaler": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler"
+ "description": "The username of the account to create in the bastion's operating system."
},
- "ebsCsiDriver": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "iamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
- }
- },
- "required": [
- "iamRoleArn"
- ]
+ "dhParamsBits": {
+ "type": "integer",
+ "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file."
},
- "loadBalancerController": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "iamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
- }
- },
- "required": [
- "iamRoleArn"
- ]
+ "vpnClientsSubnetCidr": {
+ "$ref": "#/$defs/Types.Cidr",
+ "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected."
},
- "ebsSnapshotController": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
+ "ssh": {
+ "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh"
},
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleOverrides"
+ "vpcId": {
+ "$ref": "#/$defs/Types.AwsVpcId",
+ "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted."
+ },
+ "bucketNamePrefix": {
+ "$ref": "#/$defs/Types.AwsS3BucketNamePrefix",
+ "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)."
+ },
+ "iamUserNameOverride": {
+ "$ref": "#/$defs/Types.AwsIamRoleName",
+ "description": "Overrides IAM user name for the VPN. Default is to use the cluster name."
}
},
"required": [
- "clusterAutoscaler",
- "ebsCsiDriver",
- "loadBalancerController",
- "overrides"
+ "ssh",
+ "vpnClientsSubnetCidr"
]
},
- "Spec.Distribution.Modules.Aws.ClusterAutoscaler": {
+ "Spec.Infrastructure.Vpn.Ssh": {
"type": "object",
"additionalProperties": false,
"properties": {
- "iamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
+ "publicKeys": {
+ "type": "array",
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#/$defs/Types.SshPubKey"
+ },
+ {
+ "$ref": "#/$defs/Types.FileRef"
+ }
+ ]
+ },
+ "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system."
},
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
+ "githubUsersName": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "minItems": 1,
+ "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user."
+ },
+ "allowedFromCidrs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source."
}
},
"required": [
- "iamRoleArn"
+ "allowedFromCidrs",
+ "githubUsersName"
]
},
- "Spec.Distribution.Modules.Dr": {
+ "Spec.Kubernetes": {
"type": "object",
+ "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.",
"additionalProperties": false,
"properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleOverrides"
+ "vpcId": {
+ "$ref": "#/$defs/Types.AwsVpcId",
+ "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created."
},
- "type": {
- "type": "string",
- "enum": [
- "none",
- "eks"
- ],
- "description": "The type of the DR, must be ***none*** or ***eks***"
+ "clusterIAMRoleNamePrefixOverride": {
+ "$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
+ "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name."
},
- "velero": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero"
+ "workersIAMRoleNamePrefixOverride": {
+ "$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
+ "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name."
+ },
+ "subnetIds": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.AwsSubnetId"
+ },
+ "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created."
+ },
+ "apiServer": {
+ "$ref": "#/$defs/Spec.Kubernetes.APIServer"
+ },
+ "serviceIpV4Cidr": {
+ "$ref": "#/$defs/Types.Cidr",
+ "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services."
+ },
+ "nodeAllowedSshPublicKey": {
+ "anyOf": [
+ {
+ "$ref": "#/$defs/Types.AwsSshPubKey"
+ },
+ {
+ "$ref": "#/$defs/Types.FileRef"
+ }
+ ],
+ "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file."
+ },
+ "nodePoolsLaunchKind": {
+ "type": "string",
+ "enum": [
+ "launch_configurations",
+ "launch_templates",
+ "both"
+ ],
+ "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim."
+ },
+ "nodePoolGlobalAmiType": {
+ "type": "string",
+ "enum": [
+ "alinux2",
+ "alinux2023"
+ ],
+ "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool."
+ },
+ "logRetentionDays": {
+ "type": "integer",
+ "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.",
+ "enum": [
+ 0,
+ 1,
+ 3,
+ 5,
+ 7,
+ 14,
+ 30,
+ 60,
+ 90,
+ 120,
+ 150,
+ 180,
+ 365,
+ 400,
+ 545,
+ 731,
+ 1096,
+ 1827,
+ 2192,
+ 2557,
+ 2922,
+ 3288,
+ 3653
+ ]
+ },
+ "logsTypes": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "api",
+ "audit",
+ "authenticator",
+ "controllerManager",
+ "scheduler"
+ ]
+ },
+ "minItems": 0,
+ "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types."
+ },
+ "nodePools": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool"
+ }
+ },
+ "awsAuth": {
+ "$ref": "#/$defs/Spec.Kubernetes.AwsAuth"
+ }
+ },
+ "required": [
+ "apiServer",
+ "nodeAllowedSshPublicKey",
+ "nodePools",
+ "nodePoolsLaunchKind",
+ "nodePoolGlobalAmiType"
+ ]
+ },
+ "Spec.Kubernetes.APIServer": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "privateAccess": {
+ "type": "boolean",
+ "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`."
+ },
+ "privateAccessCidrs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "minItems": 0,
+ "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server."
+ },
+ "publicAccessCidrs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "minItems": 0,
+ "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server."
+ },
+ "publicAccess": {
+ "type": "boolean",
+ "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`."
+ }
+ },
+ "required": [
+ "privateAccess",
+ "publicAccess"
+ ]
+ },
+ "Spec.Kubernetes.NodePool": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.",
+ "properties": {
+ "type": {
+ "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.",
+ "type": "string",
+ "enum": [
+ "eks-managed",
+ "self-managed"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the node pool."
+ },
+ "ami": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami"
+ },
+ "containerRuntime": {
+ "type": "string",
+ "enum": [
+ "docker",
+ "containerd"
+ ],
+ "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`."
+ },
+ "size": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size"
+ },
+ "instance": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance"
+ },
+ "attachedTargetGroups": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.AwsArn"
+ },
+ "description": "This optional array defines additional target groups to attach to the instances in the node pool."
+ },
+ "labels": {
+ "$ref": "#/$defs/Types.KubeLabels",
+ "description": "Kubernetes labels that will be added to the nodes."
+ },
+ "taints": {
+ "$ref": "#/$defs/Types.KubeTaints",
+ "description": "Kubernetes taints that will be added to the nodes."
+ },
+ "tags": {
+ "$ref": "#/$defs/Types.AwsTags",
+ "description": "AWS tags that will be added to the ASG and EC2 instances."
+ },
+ "subnetIds": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.AwsSubnetId"
+ },
+ "description": "Optional list of subnet IDs where to create the nodes."
+ },
+ "additionalFirewallRules": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules"
+ }
+ },
+ "required": [
+ "instance",
+ "name",
+ "size",
+ "type"
+ ],
+ "if": {
+ "allOf": [
+ {
+ "properties": {
+ "type": {
+ "enum": [
+ "eks-managed"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "then": {
+ "properties": {
+ "ami": {
+ "properties": {
+ "id": {
+ "type": "null"
+ },
+ "owner": {
+ "type": "null"
+ }
+ }
+ }
+ }
+ }
+ },
+ "Spec.Kubernetes.NodePool.Ami": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`."
+ },
+ "owner": {
+ "type": "string",
+ "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`."
+ },
+ "type": {
+ "type": "string",
+ "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.",
+ "enum": [
+ "alinux2",
+ "alinux2023"
+ ]
+ }
+ },
+ "oneOf": [
+ {
+ "allOf": [
+ {
+ "required": [
+ "id",
+ "owner"
+ ]
+ },
+ {
+ "not": {
+ "required": [
+ "type"
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "allOf": [
+ {
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "not": {
+ "anyOf": [
+ {
+ "required": [
+ "id"
+ ]
+ },
+ {
+ "required": [
+ "owner"
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "Spec.Kubernetes.NodePool.Instance": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for the instances that will be used in the node pool.",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The instance type to use for the nodes."
+ },
+ "spot": {
+ "type": "boolean",
+ "description": "If `true`, the nodes will be created as spot instances. Default is `false`."
+ },
+ "volumeSize": {
+ "type": "integer",
+ "description": "The size of the disk in GB."
+ },
+ "volumeType": {
+ "type": "string",
+ "description": "Volume type for the instance disk. Default is `gp2`.",
+ "enum": [
+ "gp2",
+ "gp3",
+ "io1",
+ "standard"
+ ]
+ },
+ "maxPods": {
+ "type": "integer",
+ "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "Spec.Kubernetes.NodePool.Size": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "min": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "The minimum number of nodes in the node pool."
+ },
+ "max": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "The maximum number of nodes in the node pool."
+ }
+ },
+ "required": [
+ "max",
+ "min"
+ ]
+ },
+ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Optional additional firewall rules that will be attached to the nodes.",
+ "properties": {
+ "cidrBlocks": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock"
+ },
+ "minItems": 1,
+ "maxItems": 1,
+ "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details."
+ },
+ "sourceSecurityGroupId": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId"
+ },
+ "minItems": 1
+ },
+ "self": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self"
+ },
+ "minItems": 1
+ }
+ }
+ },
+ "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.",
+ "enum": [
+ "ingress",
+ "egress"
+ ]
+ },
+ "tags": {
+ "$ref": "#/$defs/Types.AwsTags",
+ "description": "Additional AWS tags for the Firewall rule."
+ },
+ "cidrBlocks": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.Cidr"
+ },
+ "minItems": 1
+ },
+ "protocol": {
+ "$ref": "#/$defs/Types.AwsIpProtocol"
+ },
+ "ports": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
+ }
+ },
+ "required": [
+ "cidrBlocks",
+ "name",
+ "ports",
+ "protocol",
+ "type"
+ ]
+ },
+ "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name for the additional Firewall rule Security Group."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "ingress",
+ "egress"
+ ],
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic."
+ },
+ "tags": {
+ "$ref": "#/$defs/Types.AwsTags",
+ "description": "Additional AWS tags for the Firewall rule."
+ },
+ "sourceSecurityGroupId": {
+ "type": "string",
+ "description": "The source security group ID."
+ },
+ "protocol": {
+ "$ref": "#/$defs/Types.AwsIpProtocol",
+ "description": "The protocol of the Firewall rule."
+ },
+ "ports": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
+ }
+ },
+ "required": [
+ "sourceSecurityGroupId",
+ "name",
+ "ports",
+ "protocol",
+ "type"
+ ]
+ },
+ "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the Firewall rule."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "ingress",
+ "egress"
+ ],
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic."
+ },
+ "tags": {
+ "$ref": "#/$defs/Types.AwsTags",
+ "description": "Additional AWS tags for the Firewall rule."
+ },
+ "self": {
+ "type": "boolean",
+ "description": "If `true`, the source will be the security group itself."
+ },
+ "protocol": {
+ "$ref": "#/$defs/Types.AwsIpProtocol",
+ "description": "The protocol of the Firewall rule."
+ },
+ "ports": {
+ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
+ }
+ },
+ "required": [
+ "self",
+ "name",
+ "ports",
+ "protocol",
+ "type"
+ ]
+ },
+ "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": {
+ "type": "object",
+ "description": "Port range for the Firewall Rule.",
+ "additionalProperties": false,
+ "properties": {
+ "from": {
+ "$ref": "#/$defs/Types.TcpPort"
+ },
+ "to": {
+ "$ref": "#/$defs/Types.TcpPort"
+ }
+ },
+ "required": [
+ "from",
+ "to"
+ ]
+ },
+ "Spec.Kubernetes.AwsAuth": {
+ "type": "object",
+ "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html",
+ "additionalProperties": false,
+ "properties": {
+ "additionalAccounts": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap."
+ },
+ "users": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User"
+ },
+ "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap."
+ },
+ "roles": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role"
+ },
+ "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap."
+ }
+ }
+ },
+ "Spec.Kubernetes.AwsAuth.Role": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string"
+ },
+ "groups": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "rolearn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ }
+ },
+ "required": [
+ "groups",
+ "rolearn",
+ "username"
+ ]
+ },
+ "Spec.Kubernetes.AwsAuth.User": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string"
+ },
+ "groups": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "userarn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ }
+ },
+ "required": [
+ "groups",
+ "userarn",
+ "username"
+ ]
+ },
+ "Spec.Distribution": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "common": {
+ "$ref": "#/$defs/Spec.Distribution.Common"
+ },
+ "modules": {
+ "$ref": "#/$defs/Spec.Distribution.Modules"
+ },
+ "customPatches": {
+ "$ref": "../public/spec-distribution-custompatches.json"
+ }
+ },
+ "required": [
+ "modules"
+ ],
+ "if": {
+ "allOf": [
+ {
+ "required": [
+ "common"
+ ]
+ },
+ {
+ "properties": {
+ "common": {
+ "required": [
+ "provider"
+ ]
+ }
+ }
+ },
+ {
+ "properties": {
+ "common": {
+ "properties": {
+ "provider": {
+ "required": [
+ "type"
+ ]
+ }
+ }
+ }
+ }
+ },
+ {
+ "properties": {
+ "common": {
+ "properties": {
+ "provider": {
+ "properties": {
+ "type": {
+ "const": "eks"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ "then": {
+ "properties": {
+ "modules": {
+ "required": [
+ "aws"
+ ]
+ }
}
},
- "required": [
- "type"
- ],
- "if": {
+ "else": {
"properties": {
- "type": {
- "const": "eks"
+ "modules": {
+ "properties": {
+ "aws": {
+ "type": "null"
+ }
+ }
}
}
- },
- "then": {
- "required": [
- "type",
- "velero"
- ]
}
},
- "Spec.Distribution.Modules.Dr.Velero": {
+ "Spec.Distribution.Common": {
"type": "object",
"additionalProperties": false,
+ "description": "Common configuration for all the distribution modules.",
"properties": {
- "schedules": {
- "type": "object",
- "additionalProperties": false,
- "description": "Configuration for Velero's backup schedules.",
- "properties": {
- "install": {
- "type": "boolean",
- "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`."
- },
- "cron": {
- "type": "object",
- "additionalProperties": false,
- "description": "Configuration for Velero's schedules cron.",
- "properties": {
- "manifests": {
- "type": "string",
- "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
- },
- "full": {
- "type": "string",
- "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
- }
- }
- },
- "ttl": {
- "type": "string",
- "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
- }
- }
+ "nodeSelector": {
+ "$ref": "#/$defs/Types.KubeNodeSelector",
+ "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`."
},
- "eks": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks"
+ "tolerations": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.KubeToleration"
+ },
+ "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```"
},
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "provider": {
+ "$ref": "#/$defs/Spec.Distribution.Common.Provider"
+ },
+ "relativeVendorPath": {
+ "type": "string",
+ "description": "The relative path to the vendor directory, does not need to be changed."
+ },
+ "registry": {
+ "type": "string",
+ "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too."
+ }
+ }
+ },
+ "Spec.Distribution.Common.Provider": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The provider type. Don't set. FOR INTERNAL USE ONLY."
}
},
"required": [
- "eks"
+ "type"
]
},
- "Spec.Distribution.Modules.Dr.Velero.Eks": {
+ "Spec.Distribution.Modules": {
+ "type": "object",
"additionalProperties": false,
"properties": {
- "bucketName": {
- "$ref": "#/$defs/Types.AwsS3BucketName",
- "maxLength": 49,
- "description": "The name of the velero bucket"
+ "auth": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth"
},
- "iamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
+ "aws": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Aws"
},
- "region": {
- "$ref": "#/$defs/Types.AwsRegion",
- "description": "The region where the velero bucket is located"
+ "dr": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Dr"
+ },
+ "ingress": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress"
+ },
+ "logging": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Logging"
+ },
+ "monitoring": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring"
+ },
+ "tracing": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Tracing"
+ },
+ "networking": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Networking"
+ },
+ "policy": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Policy"
}
},
"required": [
- "iamRoleArn",
- "region",
- "bucketName"
- ],
- "type": "object"
+ "dr",
+ "ingress",
+ "logging",
+ "policy"
+ ]
},
"Spec.Distribution.Modules.Ingress": {
+ "type": "object",
"additionalProperties": false,
+ "properties": {
+ "overrides": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides"
+ },
+ "baseDomain": {
+ "type": "string",
+ "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone."
+ },
+ "nginx": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx",
+ "description": "Configurations for the Ingress nginx controller package."
+ },
+ "certManager": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager",
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses."
+ },
+ "dns": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS"
+ },
+ "forecastle": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle"
+ },
+ "externalDns": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS"
+ }
+ },
+ "required": [
+ "certManager",
+ "externalDns",
+ "baseDomain",
+ "nginx"
+ ],
"allOf": [
{
"if": {
@@ -711,205 +1308,50 @@
"properties": {
"provider": {
"const": "certManager"
- }
- }
- }
- }
- }
- }
- },
- "then": {
- "required": [
- "certManager"
- ]
- }
- }
- ],
- "properties": {
- "baseDomain": {
- "type": "string",
- "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone"
- },
- "certManager": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager"
- },
- "dns": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS"
- },
- "externalDns": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS"
- },
- "forecastle": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle"
- },
- "nginx": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx",
- "description": "Configurations for the nginx ingress controller module"
- },
- "overrides": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides"
- }
- },
- "required": [
- "certManager",
- "externalDns",
- "baseDomain",
- "nginx"
- ],
- "type": "object"
- },
- "Spec.Distribution.Modules.Ingress.CertManager": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "clusterIssuer": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer"
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- },
- "required": [
- "clusterIssuer"
- ]
- },
- "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": {
- "additionalProperties": false,
- "oneOf": [
- {
- "required": [
- "type"
- ]
- },
- {
- "required": [
- "solvers"
- ]
- }
- ],
- "properties": {
- "email": {
- "type": "string",
- "format": "email",
- "description": "The email of the cluster issuer"
- },
- "name": {
- "type": "string",
- "description": "The name of the cluster issuer"
- },
- "route53": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53"
- },
- "solvers": {
- "type": "array",
- "description": "The custom solvers configurations"
- },
- "type": {
- "type": "string",
- "enum": [
- "dns01",
- "http01"
- ],
- "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***"
- }
- },
- "required": [
- "route53",
- "name",
- "email"
- ],
- "type": "object"
- },
- "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "iamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
- },
- "region": {
- "$ref": "#/$defs/Types.AwsRegion"
- },
- "hostedZoneId": {
- "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "certManager"
+ ]
+ }
}
- },
- "required": [
- "hostedZoneId",
- "iamRoleArn",
- "region"
]
},
- "Spec.Distribution.Modules.Ingress.DNS": {
+ "Spec.Distribution.Modules.Ingress.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Ingress module.",
"properties": {
- "public": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public"
+ "ingresses": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses"
},
- "private": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private"
+ "nodeSelector": {
+ "$ref": "#/$defs/Types.KubeNodeSelector",
+ "description": "Set to override the node selector used to place the pods of the Ingress module."
},
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "tolerations": {
+ "type": "array",
+ "items": {
+ "$ref": "#/$defs/Types.KubeToleration"
+ },
+ "description": "Set to override the tolerations that will be added to the pods of the Ingress module."
}
}
},
- "Spec.Distribution.Modules.Ingress.DNS.Private": {
- "additionalProperties": false,
- "properties": {
- "create": {
- "type": "boolean",
- "description": "If true, the private hosted zone will be created"
- },
- "name": {
- "type": "string",
- "description": "The name of the private hosted zone"
- },
- "vpcId": {
- "type": "string"
- }
- },
- "required": [
- "vpcId",
- "name",
- "create"
- ],
- "type": "object"
- },
- "Spec.Distribution.Modules.Ingress.DNS.Public": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "name": {
- "type": "string",
- "description": "The name of the public hosted zone"
- },
- "create": {
- "type": "boolean",
- "description": "If true, the public hosted zone will be created"
- }
- },
- "required": [
- "name",
- "create"
- ]
- },
- "Spec.Distribution.Modules.Ingress.ExternalDNS": {
+ "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": {
"type": "object",
"additionalProperties": false,
"properties": {
- "privateIamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
- },
- "publicIamRoleArn": {
- "$ref": "#/$defs/Types.AwsArn"
+ "forecastle": {
+ "$ref": "#/$defs/Types.FuryModuleOverridesIngress"
}
- },
- "required": [
- "privateIamRoleArn",
- "publicIamRoleArn"
- ]
+ }
},
"Spec.Distribution.Modules.Ingress.Forecastle": {
"type": "object",
@@ -931,7 +1373,7 @@
"single",
"dual"
],
- "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***"
+ "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`."
},
"tls": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS"
@@ -955,7 +1397,7 @@
"secret",
"none"
],
- "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***"
+ "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`."
},
"secret": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret"
@@ -980,16 +1422,19 @@
"Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": {
"type": "object",
"additionalProperties": false,
+ "description": "Kubernetes TLS secret for the ingresses TLS certificate.",
"properties": {
"cert": {
"type": "string",
- "description": "The certificate file content or you can use the file notation to get the content from a file"
+ "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"key": {
- "type": "string"
+ "type": "string",
+ "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"ca": {
- "type": "string"
+ "type": "string",
+ "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
}
},
"required": [
@@ -998,38 +1443,131 @@
"key"
]
},
- "Spec.Distribution.Modules.Ingress.Overrides": {
+ "Spec.Distribution.Modules.Ingress.CertManager": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.",
"properties": {
- "ingresses": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses"
+ "clusterIssuer": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer"
},
- "nodeSelector": {
- "$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the ingress module"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ }
+ },
+ "required": [
+ "clusterIssuer"
+ ]
+ },
+ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the clusterIssuer."
},
- "tolerations": {
+ "email": {
+ "type": "string",
+ "format": "email",
+ "description": "The email address to use during the certificate issuing process."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "dns01",
+ "http01"
+ ],
+ "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge."
+ },
+ "solvers": {
"type": "array",
- "items": {
- "$ref": "#/$defs/Types.KubeToleration"
- },
- "description": "The tolerations that will be added to the pods for the ingress module"
+ "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field."
+ },
+ "route53": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53"
}
- }
+ },
+ "required": [
+ "route53",
+ "name",
+ "email"
+ ],
+ "oneOf": [
+ {
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "required": [
+ "solvers"
+ ]
+ }
+ ]
},
- "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": {
+ "Spec.Distribution.Modules.Ingress.DNS": {
"type": "object",
+ "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.",
"additionalProperties": false,
"properties": {
- "forecastle": {
- "$ref": "#/$defs/Types.FuryModuleOverridesIngress"
+ "public": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public"
+ },
+ "private": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private"
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
}
},
+ "Spec.Distribution.Modules.Ingress.DNS.Public": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the public hosted zone."
+ },
+ "create": {
+ "type": "boolean",
+ "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead."
+ }
+ },
+ "required": [
+ "name",
+ "create"
+ ]
+ },
+ "Spec.Distribution.Modules.Ingress.DNS.Private": {
+ "type": "object",
+ "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`."
+ },
+ "create": {
+ "type": "boolean",
+ "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead."
+ },
+ "vpcId": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "vpcId",
+ "name",
+ "create"
+ ]
+ },
"Spec.Distribution.Modules.Logging": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1042,7 +1580,7 @@
"loki",
"customOutputs"
],
- "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage."
+ "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`."
},
"opensearch": {
"$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch"
@@ -1081,6 +1619,20 @@
]
}
},
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "loki"
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "loki"
+ ]
+ }
+ },
{
"if": {
"properties": {
@@ -1097,8 +1649,36 @@
}
]
},
+ "Spec.Distribution.Modules.Logging.Opensearch": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "single",
+ "triple"
+ ],
+ "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment."
+ },
+ "resources": {
+ "$ref": "#/$defs/Types.KubeResources"
+ },
+ "storageSize": {
+ "type": "string",
+ "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`."
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
"Spec.Distribution.Modules.Logging.Cerebro": {
"type": "object",
+ "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.",
"additionalProperties": false,
"properties": {
"overrides": {
@@ -1106,61 +1686,42 @@
}
}
},
- "Spec.Distribution.Modules.Logging.CustomOutputs": {
- "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
+ "Spec.Distribution.Modules.Logging.Minio": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Logging's MinIO deployment.",
"properties": {
- "audit": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
- },
- "events": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
- },
- "infra": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
- },
- "ingressNginx": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
- },
- "kubernetes": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
- },
- "systemdCommon": {
+ "storageSize": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
- "systemdEtcd": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "rootUser": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string",
+ "description": "The username for the default MinIO root user."
+ },
+ "password": {
+ "type": "string",
+ "description": "The password for the default MinIO root user."
+ }
+ }
},
- "errors": {
- "type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "audit",
- "events",
- "infra",
- "ingressNginx",
- "kubernetes",
- "systemdCommon",
- "systemdEtcd",
- "errors"
- ]
+ }
},
"Spec.Distribution.Modules.Logging.Loki": {
"type": "object",
+ "description": "Configuration for the Loki package.",
"additionalProperties": false,
"properties": {
"backend": {
"type": "string",
+ "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.",
"enum": [
"minio",
"externalEndpoint"
@@ -1169,101 +1730,106 @@
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Loki's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the loki external endpoint"
+ "description": "External S3-compatible endpoint for Loki's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the loki external endpoint will be insecure"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the loki external endpoint"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the loki external endpoint"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the loki external endpoint"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
+ "tsdbStartDate": {
+ "type": "string",
+ "format": "date",
+ "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`."
+ },
"resources": {
"$ref": "#/$defs/Types.KubeResources"
}
- }
+ },
+ "required": [
+ "tsdbStartDate"
+ ]
},
- "Spec.Distribution.Modules.Logging.Minio": {
+ "Spec.Distribution.Modules.Logging.Operator": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging Operator.",
"properties": {
- "storageSize": {
- "type": "string",
- "description": "The PVC size for each minio disk, 6 disks total"
- },
- "rootUser": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "username": {
- "type": "string",
- "description": "The username of the minio root user"
- },
- "password": {
- "type": "string",
- "description": "The password of the minio root user"
- }
- }
- },
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
}
},
- "Spec.Distribution.Modules.Logging.Opensearch": {
+ "Spec.Distribution.Modules.Logging.CustomOutputs": {
+ "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
"type": "object",
"additionalProperties": false,
"properties": {
- "type": {
+ "audit": {
"type": "string",
- "enum": [
- "single",
- "triple"
- ],
- "description": "The type of the opensearch, must be ***single*** or ***triple***"
+ "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
- "resources": {
- "$ref": "#/$defs/Types.KubeResources"
+ "events": {
+ "type": "string",
+ "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
- "storageSize": {
+ "infra": {
"type": "string",
- "description": "The storage size for the opensearch pods"
+ "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "ingressNginx": {
+ "type": "string",
+ "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
+ },
+ "kubernetes": {
+ "type": "string",
+ "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
+ },
+ "systemdCommon": {
+ "type": "string",
+ "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
+ },
+ "systemdEtcd": {
+ "type": "string",
+ "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
+ },
+ "errors": {
+ "type": "string",
+ "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
}
},
"required": [
- "type"
+ "audit",
+ "events",
+ "infra",
+ "ingressNginx",
+ "kubernetes",
+ "systemdCommon",
+ "systemdEtcd",
+ "errors"
]
},
- "Spec.Distribution.Modules.Logging.Operator": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
"Spec.Distribution.Modules.Monitoring": {
"type": "object",
"additionalProperties": false,
- "description": "configuration for the Monitoring module components",
+ "description": "Configuration for the Monitoring module.",
"properties": {
"type": {
"type": "string",
@@ -1273,7 +1839,7 @@
"prometheusAgent",
"mimir"
],
- "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage."
+ "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1310,133 +1876,6 @@
"type"
]
},
- "Spec.Distribution.Modules.Monitoring.AlertManager": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "deadManSwitchWebhookUrl": {
- "type": "string",
- "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io"
- },
- "installDefaultRules": {
- "type": "boolean",
- "description": "If true, the default rules will be installed"
- },
- "slackWebhookUrl": {
- "type": "string",
- "description": "The slack webhook url to send alerts"
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.BlackboxExporter": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.Grafana": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "usersRoleAttributePath": {
- "type": "string",
- "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)."
- },
- "basicAuthIngress": {
- "type": "boolean",
- "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled."
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.Mimir": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "retentionTime": {
- "type": "string",
- "description": "The retention time for the mimir pods"
- },
- "backend": {
- "type": "string",
- "enum": [
- "minio",
- "externalEndpoint"
- ],
- "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***"
- },
- "externalEndpoint": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "endpoint": {
- "type": "string",
- "description": "The endpoint of the external mimir backend"
- },
- "insecure": {
- "type": "boolean",
- "description": "If true, the external mimir backend will not use tls"
- },
- "secretAccessKey": {
- "type": "string",
- "description": "The secret access key of the external mimir backend"
- },
- "accessKeyId": {
- "type": "string",
- "description": "The access key id of the external mimir backend"
- },
- "bucketName": {
- "type": "string",
- "description": "The bucket name of the external mimir backend"
- }
- }
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.Minio": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "storageSize": {
- "type": "string",
- "description": "The storage size for the minio pods"
- },
- "rootUser": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "username": {
- "type": "string",
- "description": "The username for the minio root user"
- },
- "password": {
- "type": "string",
- "description": "The password for the minio root user"
- }
- }
- },
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
"Spec.Distribution.Modules.Monitoring.Prometheus": {
"type": "object",
"additionalProperties": false,
@@ -1446,259 +1885,111 @@
},
"retentionTime": {
"type": "string",
- "description": "The retention time for the k8s Prometheus instance."
+ "description": "The retention time for the `k8s` Prometheus instance."
},
"retentionSize": {
"type": "string",
- "description": "The retention size for the k8s Prometheus instance."
+ "description": "The retention size for the `k8s` Prometheus instance."
},
"storageSize": {
"type": "string",
- "description": "The storage size for the k8s Prometheus instance."
- },
- "remoteWrite": {
- "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).",
- "type": "array",
- "items": {
- "type": "object"
- }
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.PrometheusAgent": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "resources": {
- "$ref": "#/$defs/Types.KubeResources"
+ "description": "The storage size for the `k8s` Prometheus instance."
},
"remoteWrite": {
"description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).",
- "type": "array",
- "items": {
- "type": "object"
- }
- }
- }
- },
- "Spec.Distribution.Modules.Monitoring.X509Exporter": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- }
- }
- },
- "Spec.Distribution.Modules.Networking": {
- "additionalProperties": false,
- "properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
- },
- "tigeraOperator": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator"
- },
- "type": {
- "type": "string",
- "enum": [
- "none"
- ]
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
}
- },
- "type": "object"
+ }
},
- "Spec.Distribution.Modules.Networking.TigeraOperator": {
+ "Spec.Distribution.Modules.Monitoring.PrometheusAgent": {
"type": "object",
"additionalProperties": false,
"properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "resources": {
+ "$ref": "#/$defs/Types.KubeResources"
+ },
+ "remoteWrite": {
+ "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
}
}
},
- "Spec.Distribution.Modules.Policy": {
+ "Spec.Distribution.Modules.Monitoring.AlertManager": {
"type": "object",
"additionalProperties": false,
"properties": {
- "overrides": {
- "$ref": "#/$defs/Types.FuryModuleOverrides"
- },
- "type": {
+ "deadManSwitchWebhookUrl": {
"type": "string",
- "enum": [
- "none",
- "gatekeeper",
- "kyverno"
- ],
- "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***"
- },
- "gatekeeper": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper"
+ "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io."
},
- "kyverno": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno"
- }
- },
- "required": [
- "type"
- ],
- "allOf": [
- {
- "if": {
- "properties": {
- "type": {
- "const": "gatekeeper"
- }
- }
- },
- "then": {
- "required": [
- "gatekeeper"
- ]
- }
+ "installDefaultRules": {
+ "type": "boolean",
+ "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution."
},
- {
- "if": {
- "properties": {
- "type": {
- "const": "kyverno"
- }
- }
- },
- "then": {
- "required": [
- "kyverno"
- ]
- }
+ "slackWebhookUrl": {
+ "type": "string",
+ "description": "The Slack webhook URL where to send the infrastructural and workload alerts to."
}
- ]
+ }
},
- "Spec.Distribution.Modules.Policy.Gatekeeper": {
+ "Spec.Distribution.Modules.Monitoring.Grafana": {
"type": "object",
"additionalProperties": false,
"properties": {
- "additionalExcludedNamespaces": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them."
- },
- "enforcementAction": {
+ "usersRoleAttributePath": {
"type": "string",
- "enum": [
- "deny",
- "dryrun",
- "warn"
- ],
- "description": "The enforcement action to use for the gatekeeper module"
+ "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)."
},
- "installDefaultPolicies": {
+ "basicAuthIngress": {
"type": "boolean",
- "description": "If true, the default policies will be installed"
+ "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "enforcementAction",
- "installDefaultPolicies"
- ]
+ }
},
- "Spec.Distribution.Modules.Policy.Kyverno": {
+ "Spec.Distribution.Modules.Monitoring.BlackboxExporter": {
"type": "object",
"additionalProperties": false,
"properties": {
- "additionalExcludedNamespaces": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them."
- },
- "validationFailureAction": {
- "type": "string",
- "enum": [
- "Audit",
- "Enforce"
- ],
- "description": "The validation failure action to use for the kyverno module"
- },
- "installDefaultPolicies": {
- "type": "boolean",
- "description": "If true, the default policies will be installed"
- },
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "validationFailureAction",
- "installDefaultPolicies"
- ]
+ }
},
- "Spec.Distribution.Modules.Tracing": {
+ "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": {
"type": "object",
"additionalProperties": false,
"properties": {
"overrides": {
- "$ref": "#/$defs/Types.FuryModuleOverrides"
- },
- "type": {
- "type": "string",
- "enum": [
- "none",
- "tempo"
- ],
- "description": "The type of tracing to use, either ***none*** or ***tempo***"
- },
- "tempo": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo"
- },
- "minio": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio"
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "type"
- ]
+ }
},
- "Spec.Distribution.Modules.Tracing.Minio": {
+ "Spec.Distribution.Modules.Monitoring.X509Exporter": {
"type": "object",
"additionalProperties": false,
"properties": {
- "storageSize": {
- "type": "string",
- "description": "The storage size for the minio pods"
- },
- "rootUser": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "username": {
- "type": "string",
- "description": "The username for the minio root user"
- },
- "password": {
- "type": "string",
- "description": "The password for the minio root user"
- }
- }
- },
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
}
},
- "Spec.Distribution.Modules.Tracing.Tempo": {
+ "Spec.Distribution.Modules.Monitoring.Mimir": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Mimir package.",
"properties": {
"retentionTime": {
"type": "string",
- "description": "The retention time for the tempo pods"
+ "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days."
},
"backend": {
"type": "string",
@@ -1706,844 +1997,758 @@
"minio",
"externalEndpoint"
],
- "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***"
+ "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Mimir's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the external tempo backend"
+ "description": "The external S3-compatible endpoint for Mimir's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the external tempo backend will not use tls"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the external tempo backend"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the external tempo backend"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the external tempo backend"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- }
- },
- "Spec.Infrastructure": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "vpc": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpc",
- "description": "This key defines the VPC that will be created in AWS"
- },
- "vpn": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpn",
- "description": "This section defines the creation of VPN bastions"
- }
- },
- "allOf": [
- {
- "if": {
- "allOf": [
- {
- "properties": {
- "vpc": {
- "type": "null"
- }
- }
- },
- {
- "not": {
- "properties": {
- "vpn": {
- "type": "null"
- }
- }
- }
- }
- ]
- },
- "then": {
- "properties": {
- "vpn": {
- "required": [
- "vpcId"
- ]
- }
- }
- }
- },
- {
- "if": {
- "allOf": [
- {
- "not": {
- "properties": {
- "vpc": {
- "type": "null"
- }
- }
- }
- },
- {
- "not": {
- "properties": {
- "vpn": {
- "properties": {
- "vpcId": {
- "type": "null"
- }
- }
- }
- }
- }
- }
- ]
- },
- "then": {
- "properties": {
- "vpn": {
- "properties": {
- "vpcId": {
- "type": "null"
- }
- }
- }
- }
- }
- }
- ]
- },
- "Spec.Infrastructure.Vpc": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "network": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network"
- }
- },
- "required": [
- "network"
- ]
+ }
},
- "Spec.Infrastructure.Vpc.Network": {
+ "Spec.Distribution.Modules.Monitoring.Minio": {
"type": "object",
+ "description": "Configuration for Monitoring's MinIO deployment.",
"additionalProperties": false,
"properties": {
- "cidr": {
- "$ref": "#/$defs/Types.Cidr",
- "description": "This is the CIDR of the VPC that will be created"
+ "storageSize": {
+ "type": "string",
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
- "subnetsCidrs": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs"
+ "rootUser": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string",
+ "description": "The username for the default MinIO root user."
+ },
+ "password": {
+ "type": "string",
+ "description": "The password for the default MinIO root user."
+ }
+ }
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "cidr",
- "subnetsCidrs"
- ]
+ }
},
- "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": {
+ "Spec.Distribution.Modules.Tracing": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tracing module.",
"properties": {
- "private": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
},
- "public": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created"
+ "type": {
+ "type": "string",
+ "enum": [
+ "none",
+ "tempo"
+ ],
+ "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`."
+ },
+ "tempo": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo"
+ },
+ "minio": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio"
}
},
"required": [
- "private",
- "public"
+ "type"
]
},
- "Spec.Infrastructure.Vpn": {
+ "Spec.Distribution.Modules.Tracing.Tempo": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tempo package.",
"properties": {
- "instances": {
- "type": "integer",
- "description": "The number of instances to create, 0 to skip the creation"
- },
- "port": {
- "$ref": "#/$defs/Types.TcpPort",
- "description": "The port used by the OpenVPN server"
- },
- "instanceType": {
+ "retentionTime": {
"type": "string",
- "description": "The size of the AWS EC2 instance"
+ "description": "The retention time for the traces stored in Tempo."
},
- "diskSize": {
- "type": "integer",
- "description": "The size of the disk in GB"
- },
- "operatorName": {
+ "backend": {
"type": "string",
- "description": "The username of the account to create in the bastion's operating system"
- },
- "dhParamsBits": {
- "type": "integer",
- "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file"
- },
- "vpnClientsSubnetCidr": {
- "$ref": "#/$defs/Types.Cidr",
- "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected"
- },
- "ssh": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh"
- },
- "vpcId": {
- "$ref": "#/$defs/Types.AwsVpcId",
- "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted"
+ "enum": [
+ "minio",
+ "externalEndpoint"
+ ],
+ "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
- "bucketNamePrefix": {
- "$ref": "#/$defs/Types.AwsS3BucketNamePrefix",
- "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states"
+ "externalEndpoint": {
+ "description": "Configuration for Tempo's external storage backend.",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "endpoint": {
+ "type": "string",
+ "description": "The external S3-compatible endpoint for Tempo's storage."
+ },
+ "insecure": {
+ "type": "boolean",
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
+ },
+ "secretAccessKey": {
+ "type": "string",
+ "description": "The secret access key (password) for the external S3-compatible bucket."
+ },
+ "accessKeyId": {
+ "type": "string",
+ "description": "The access key ID (username) for the external S3-compatible bucket."
+ },
+ "bucketName": {
+ "type": "string",
+ "description": "The bucket name of the external S3-compatible object storage."
+ }
+ }
},
- "iamUserNameOverride": {
- "$ref": "#/$defs/Types.AwsIamRoleName",
- "description": "Overrides the default IAM user name for the VPN"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "ssh",
- "vpnClientsSubnetCidr"
- ]
+ }
},
- "Spec.Infrastructure.Vpn.Ssh": {
+ "Spec.Distribution.Modules.Tracing.Minio": {
"type": "object",
+ "description": "Configuration for Tracing's MinIO deployment.",
"additionalProperties": false,
"properties": {
- "publicKeys": {
- "type": "array",
- "items": {
- "anyOf": [
- {
- "$ref": "#/$defs/Types.SshPubKey"
- },
- {
- "$ref": "#/$defs/Types.FileRef"
- }
- ]
- },
- "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented"
+ "storageSize": {
+ "type": "string",
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
- "githubUsersName": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "minItems": 1,
- "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user"
+ "rootUser": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "username": {
+ "type": "string",
+ "description": "The username for the default MinIO root user."
+ },
+ "password": {
+ "type": "string",
+ "description": "The password for the default MinIO root user."
+ }
+ }
},
- "allowedFromCidrs": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "description": "The CIDR enabled in the security group that can access the bastions in SSH"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
- },
- "required": [
- "allowedFromCidrs",
- "githubUsersName"
- ]
+ }
},
- "Spec.Kubernetes": {
+ "Spec.Distribution.Modules.Networking": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Networking module.",
"properties": {
- "vpcId": {
- "$ref": "#/$defs/Types.AwsVpcId",
- "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted"
- },
- "clusterIAMRoleNamePrefixOverride": {
- "$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
- "description": "Overrides the default IAM role name prefix for the EKS cluster"
- },
- "workersIAMRoleNamePrefixOverride": {
- "$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
- "description": "Overrides the default IAM role name prefix for the EKS workers"
- },
- "subnetIds": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.AwsSubnetId"
- },
- "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted"
- },
- "apiServer": {
- "$ref": "#/$defs/Spec.Kubernetes.APIServer"
- },
- "serviceIpV4Cidr": {
- "$ref": "#/$defs/Types.Cidr",
- "description": "This value defines the CIDR that will be used to assign IP addresses to the services"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
},
- "nodeAllowedSshPublicKey": {
- "anyOf": [
- {
- "$ref": "#/$defs/Types.AwsSshPubKey"
- },
- {
- "$ref": "#/$defs/Types.FileRef"
- }
- ],
- "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user"
+ "tigeraOperator": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator"
},
- "nodePoolsLaunchKind": {
+ "type": {
"type": "string",
"enum": [
- "launch_configurations",
- "launch_templates",
- "both"
- ],
- "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim."
- },
- "logRetentionDays": {
- "type": "integer",
- "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days."
- },
- "logsTypes": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "api",
- "audit",
- "authenticator",
- "controllerManager",
- "scheduler"
- ]
- },
- "minItems": 0,
- "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types."
- },
- "nodePools": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool"
- }
- },
- "awsAuth": {
- "$ref": "#/$defs/Spec.Kubernetes.AwsAuth"
+ "none"
+ ]
}
- },
- "required": [
- "apiServer",
- "nodeAllowedSshPublicKey",
- "nodePools",
- "nodePoolsLaunchKind"
- ]
+ }
+ },
+ "Spec.Distribution.Modules.Networking.TigeraOperator": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ }
+ }
},
- "Spec.Kubernetes.APIServer": {
+ "Spec.Distribution.Modules.Policy": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Policy module.",
"properties": {
- "privateAccess": {
- "type": "boolean",
- "description": "This value defines if the API server will be accessible only from the private subnets"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
},
- "privateAccessCidrs": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "minItems": 0,
- "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets"
+ "type": {
+ "type": "string",
+ "enum": [
+ "none",
+ "gatekeeper",
+ "kyverno"
+ ],
+ "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`."
},
- "publicAccessCidrs": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "minItems": 0,
- "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets"
+ "gatekeeper": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper"
},
- "publicAccess": {
- "type": "boolean",
- "description": "This value defines if the API server will be accessible from the public subnets"
+ "kyverno": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno"
}
},
"required": [
- "privateAccess",
- "publicAccess"
- ]
- },
- "Spec.Kubernetes.AwsAuth": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "additionalAccounts": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap"
- },
- "users": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User"
+ "type"
+ ],
+ "allOf": [
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "gatekeeper"
+ }
+ }
},
- "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap"
+ "then": {
+ "required": [
+ "gatekeeper"
+ ]
+ }
},
- "roles": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role"
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "kyverno"
+ }
+ }
},
- "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap"
+ "then": {
+ "required": [
+ "kyverno"
+ ]
+ }
}
- }
+ ]
},
- "Spec.Kubernetes.AwsAuth.Role": {
+ "Spec.Distribution.Modules.Policy.Gatekeeper": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Gatekeeper package.",
"properties": {
- "username": {
- "type": "string"
- },
- "groups": {
+ "additionalExcludedNamespaces": {
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them."
},
- "rolearn": {
- "$ref": "#/$defs/Types.AwsArn"
+ "enforcementAction": {
+ "type": "string",
+ "enum": [
+ "deny",
+ "dryrun",
+ "warn"
+ ],
+ "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations."
+ },
+ "installDefaultPolicies": {
+ "type": "boolean",
+ "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution."
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
},
"required": [
- "groups",
- "rolearn",
- "username"
+ "enforcementAction",
+ "installDefaultPolicies"
]
},
- "Spec.Kubernetes.AwsAuth.User": {
+ "Spec.Distribution.Modules.Policy.Kyverno": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Kyverno package.",
"properties": {
- "username": {
- "type": "string"
- },
- "groups": {
+ "additionalExcludedNamespaces": {
"type": "array",
"items": {
"type": "string"
- }
+ },
+ "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them."
},
- "userarn": {
- "$ref": "#/$defs/Types.AwsArn"
+ "validationFailureAction": {
+ "type": "string",
+ "enum": [
+ "Audit",
+ "Enforce"
+ ],
+ "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies."
+ },
+ "installDefaultPolicies": {
+ "type": "boolean",
+ "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution."
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
},
"required": [
- "groups",
- "userarn",
- "username"
+ "validationFailureAction",
+ "installDefaultPolicies"
]
},
- "Spec.Kubernetes.NodePool": {
+ "Spec.Distribution.Modules.Dr": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Disaster Recovery module.",
"properties": {
- "type": {
- "type": "string",
- "enum": [
- "eks-managed",
- "self-managed"
- ]
- },
- "name": {
- "type": "string",
- "description": "The name of the node pool"
- },
- "ami": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
},
- "containerRuntime": {
+ "type": {
"type": "string",
"enum": [
- "docker",
- "containerd"
+ "none",
+ "eks"
],
- "description": "The container runtime to use for the nodes"
- },
- "size": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size"
- },
- "instance": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance"
- },
- "attachedTargetGroups": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.AwsArn"
- },
- "description": "This optional array defines additional target groups to attach to the instances in the node pool"
- },
- "labels": {
- "$ref": "#/$defs/Types.KubeLabels",
- "description": "Kubernetes labels that will be added to the nodes"
- },
- "taints": {
- "$ref": "#/$defs/Types.KubeTaints",
- "description": "Kubernetes taints that will be added to the nodes"
- },
- "tags": {
- "$ref": "#/$defs/Types.AwsTags",
- "description": "AWS tags that will be added to the ASG and EC2 instances"
+ "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`."
},
- "subnetIds": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.AwsSubnetId"
- },
- "description": "This value defines the subnet IDs where the nodes will be created"
- },
- "additionalFirewallRules": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules"
+ "velero": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero"
}
},
"required": [
- "instance",
- "name",
- "size"
- ]
+ "type"
+ ],
+ "if": {
+ "properties": {
+ "type": {
+ "const": "eks"
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "type",
+ "velero"
+ ]
+ }
},
- "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": {
+ "Spec.Distribution.Modules.Dr.Velero": {
"type": "object",
"additionalProperties": false,
"properties": {
- "name": {
- "type": "string"
- },
- "type": {
- "type": "string",
- "enum": [
- "ingress",
- "egress"
- ]
- },
- "tags": {
- "$ref": "#/$defs/Types.AwsTags"
- },
- "cidrBlocks": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Types.Cidr"
- },
- "minItems": 1
- },
- "protocol": {
- "$ref": "#/$defs/Types.AwsIpProtocol"
+ "schedules": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's backup schedules.",
+ "properties": {
+ "install": {
+ "type": "boolean",
+ "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`."
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero schedules.",
+ "properties": {
+ "manifests": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
+ },
+ "full": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ },
+ "snapshotMoveData": {
+ "type": "boolean",
+ "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation."
+ }
+ }
+ }
+ }
+ }
+ }
},
- "ports": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
- }
- },
- "required": [
- "cidrBlocks",
- "name",
- "ports",
- "protocol",
- "type"
- ]
- },
- "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "from": {
- "$ref": "#/$defs/Types.TcpPort"
+ "eks": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks"
},
- "to": {
- "$ref": "#/$defs/Types.TcpPort"
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
},
"required": [
- "from",
- "to"
+ "eks"
]
},
- "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": {
+ "Spec.Distribution.Modules.Dr.Velero.Eks": {
"type": "object",
"additionalProperties": false,
"properties": {
- "name": {
- "type": "string",
- "description": "The name of the FW rule"
- },
- "type": {
- "type": "string",
- "enum": [
- "ingress",
- "egress"
- ],
- "description": "The type of the FW rule can be ingress or egress"
- },
- "tags": {
- "$ref": "#/$defs/Types.AwsTags",
- "description": "The tags of the FW rule"
- },
- "self": {
- "type": "boolean",
- "description": "If true, the source will be the security group itself"
+ "region": {
+ "$ref": "#/$defs/Types.AwsRegion",
+ "description": "The region where the bucket for Velero will be located."
},
- "protocol": {
- "$ref": "#/$defs/Types.AwsIpProtocol",
- "description": "The protocol of the FW rule"
+ "bucketName": {
+ "$ref": "#/$defs/Types.AwsS3BucketName",
+ "maxLength": 49,
+ "description": "The name of the bucket for Velero."
},
- "ports": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
+ "iamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
}
},
"required": [
- "self",
- "name",
- "ports",
- "protocol",
- "type"
+ "iamRoleArn",
+ "region",
+ "bucketName"
]
},
- "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": {
+ "Spec.Distribution.Modules.Auth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Auth module.",
"properties": {
- "name": {
- "type": "string",
- "description": "The name of the FW rule"
- },
- "type": {
- "type": "string",
- "enum": [
- "ingress",
- "egress"
- ],
- "description": "The type of the FW rule can be ingress or egress"
+ "overrides": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides"
},
- "tags": {
- "$ref": "#/$defs/Types.AwsTags",
- "description": "The tags of the FW rule"
+ "provider": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider"
},
- "sourceSecurityGroupId": {
+ "baseDomain": {
"type": "string",
- "description": "The source security group ID"
+ "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class."
},
- "protocol": {
- "$ref": "#/$defs/Types.AwsIpProtocol",
- "description": "The protocol of the FW rule"
+ "pomerium": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium"
},
- "ports": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
+ "dex": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex"
}
},
"required": [
- "sourceSecurityGroupId",
- "name",
- "ports",
- "protocol",
- "type"
+ "provider"
+ ],
+ "allOf": [
+ {
+ "if": {
+ "properties": {
+ "provider": {
+ "properties": {
+ "type": {
+ "const": "sso"
+ }
+ }
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "dex",
+ "pomerium",
+ "baseDomain"
+ ]
+ },
+ "else": {
+ "properties": {
+ "dex": {
+ "type": "null"
+ },
+ "pomerium": {
+ "type": "null"
+ }
+ }
+ }
+ },
+ {
+ "if": {
+ "properties": {
+ "provider": {
+ "properties": {
+ "type": {
+ "const": "basicAuth"
+ }
+ }
+ }
+ }
+ },
+ "then": {
+ "properties": {
+ "provider": {
+ "required": [
+ "basicAuth"
+ ]
+ }
+ }
+ },
+ "else": {
+ "properties": {
+ "provider": {
+ "basicAuth": {
+ "type": "null"
+ }
+ }
+ }
+ }
+ }
]
},
- "Spec.Kubernetes.NodePool.AdditionalFirewallRules": {
+ "Spec.Distribution.Modules.Auth.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Auth module.",
"properties": {
- "cidrBlocks": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock"
- },
- "minItems": 1,
- "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored."
+ "nodeSelector": {
+ "$ref": "#/$defs/Types.KubeNodeSelector",
+ "description": "Set to override the node selector used to place the pods of the Auth module."
},
- "sourceSecurityGroupId": {
- "type": "array",
+ "tolerations": {
+ "type": [
+ "array",
+ "null"
+ ],
"items": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId"
+ "$ref": "#/$defs/Types.KubeToleration"
},
- "minItems": 1
+ "description": "Set to override the tolerations that will be added to the pods of the Auth module."
},
- "self": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self"
- },
- "minItems": 1
+ "ingresses": {
+ "type": "object",
+ "description": "Override the definition of the Auth module ingresses.",
+ "additionalProperties": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress"
+ }
}
}
},
- "Spec.Kubernetes.NodePool.Ami": {
+ "Spec.Distribution.Modules.Auth.Overrides.Ingress": {
"type": "object",
"additionalProperties": false,
"properties": {
- "id": {
+ "host": {
"type": "string",
- "description": "The AMI ID to use for the nodes"
+ "description": "Use this host for the ingress instead of the default one."
},
- "owner": {
+ "ingressClass": {
"type": "string",
- "description": "The owner of the AMI"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
},
"required": [
- "id",
- "owner"
+ "host",
+ "ingressClass"
]
},
- "Spec.Kubernetes.NodePool.Instance": {
+ "Spec.Distribution.Modules.Auth.Provider": {
"type": "object",
"additionalProperties": false,
"properties": {
"type": {
- "type": "string",
- "description": "The instance type to use for the nodes"
- },
- "spot": {
- "type": "boolean",
- "description": "If true, the nodes will be created as spot instances"
- },
- "volumeSize": {
- "type": "integer",
- "description": "The size of the disk in GB"
- },
- "volumeType": {
"type": "string",
"enum": [
- "gp2",
- "gp3",
- "io1",
- "standard"
- ]
- },
- "maxPods": {
- "type": "integer"
- }
- },
- "required": [
- "type"
- ]
- },
- "Spec.Kubernetes.NodePool.Size": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "min": {
- "type": "integer",
- "minimum": 0,
- "description": "The minimum number of nodes in the node pool"
+ "none",
+ "basicAuth",
+ "sso"
+ ],
+ "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`."
},
- "max": {
- "type": "integer",
- "minimum": 0,
- "description": "The maximum number of nodes in the node pool"
- }
- },
- "required": [
- "max",
- "min"
- ]
- },
- "Spec.ToolsConfiguration": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "terraform": {
- "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform"
+ "basicAuth": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth"
}
},
"required": [
- "terraform"
+ "type"
]
},
- "Spec.ToolsConfiguration.Terraform": {
+ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the HTTP Basic Auth provider.",
"properties": {
- "state": {
- "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State"
+ "username": {
+ "type": "string",
+ "description": "The username for logging in with the HTTP basic authentication."
+ },
+ "password": {
+ "type": "string",
+ "description": "The password for logging in with the HTTP basic authentication."
}
},
"required": [
- "state"
+ "username",
+ "password"
]
},
- "Spec.ToolsConfiguration.Terraform.State": {
+ "Spec.Distribution.Modules.Auth.Pomerium": {
+ "$ref": "../public/spec-distribution-modules-auth-pomerium.json"
+ },
+ "Spec.Distribution.Modules.Auth.Dex": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Dex package.",
"properties": {
- "s3": {
- "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3"
+ "connectors": {
+ "type": "array",
+ "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/"
+ },
+ "additionalStaticClients": {
+ "type": "array",
+ "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/"
+ },
+ "expiry": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "signingKeys": {
+ "type": "string",
+ "description": "Dex signing key expiration time duration (default 6h)."
+ },
+ "idTokens": {
+ "type": "string",
+ "description": "Dex ID tokens expiration time duration (default 24h)."
+ }
+ }
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
}
},
"required": [
- "s3"
+ "connectors"
]
},
- "Spec.ToolsConfiguration.Terraform.State.S3": {
+ "Spec.Distribution.Modules.Aws": {
"type": "object",
"additionalProperties": false,
"properties": {
- "bucketName": {
- "$ref": "#/$defs/Types.AwsS3BucketName",
- "description": "This value defines which bucket will be used to store all the states"
+ "clusterAutoscaler": {
+ "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler"
},
- "keyPrefix": {
- "$ref": "#/$defs/Types.AwsS3KeyPrefix",
- "description": "This value defines which folder will be used to store all the states inside the bucket"
+ "ebsCsiDriver": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "iamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
+ }
+ },
+ "required": [
+ "iamRoleArn"
+ ]
},
- "region": {
- "$ref": "#/$defs/Types.AwsRegion",
- "description": "This value defines in which region the bucket is located"
+ "loadBalancerController": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "iamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
+ }
+ },
+ "required": [
+ "iamRoleArn"
+ ]
},
- "skipRegionValidation": {
- "type": "boolean",
- "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region"
+ "ebsSnapshotController": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ }
+ }
+ },
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
}
},
"required": [
- "bucketName",
- "keyPrefix",
- "region"
+ "clusterAutoscaler",
+ "ebsCsiDriver",
+ "loadBalancerController",
+ "overrides"
]
},
- "Types.AwsArn": {
+ "Types.SemVer": {
"type": "string",
- "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$"
+ "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"
},
- "Types.AwsIamRoleName": {
+ "Types.IpAddress": {
"type": "string",
- "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$"
+ "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$"
},
- "Types.AwsIamRoleNamePrefix": {
+ "Types.Cidr": {
"type": "string",
- "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$"
+ "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$"
},
- "Types.AwsIpProtocol": {
+ "Types.FileRef": {
"type": "string",
- "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$",
- "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly"
+ "pattern": "^\\{file\\:\\/\\/.+\\}$"
+ },
+ "Types.EnvRef": {
+ "type": "string",
+ "pattern": "\\{^env\\:\\/\\/.*\\}$"
+ },
+ "Types.TcpPort": {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65535
+ },
+ "Types.SshPubKey": {
+ "type": "string",
+ "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+"
+ },
+ "Types.Uri": {
+ "type": "string",
+ "pattern": "^(http|https)\\:\\/\\/.+$"
+ },
+ "Types.AwsArn": {
+ "type": "string",
+ "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$"
},
"Types.AwsRegion": {
"type": "string",
@@ -2579,6 +2784,37 @@
"us-west-2"
]
},
+ "Types.AwsVpcId": {
+ "type": "string",
+ "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$"
+ },
+ "Types.AwsSshPubKey": {
+ "type": "string",
+ "pattern": "^ssh\\-(ed25519|rsa)\\s+"
+ },
+ "Types.AwsSubnetId": {
+ "type": "string",
+ "pattern": "^subnet\\-[0-9a-f]{17}$"
+ },
+ "Types.AwsTags": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "Types.AwsIpProtocol": {
+ "type": "string",
+ "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$",
+ "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly"
+ },
+ "Types.AwsIamRoleNamePrefix": {
+ "type": "string",
+ "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$"
+ },
+ "Types.AwsIamRoleName": {
+ "type": "string",
+ "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$"
+ },
"Types.AwsS3BucketName": {
"type": "string",
"allOf": [
@@ -2586,67 +2822,139 @@
"pattern": "^[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]$"
},
{
- "not": {
- "pattern": "^xn--|-s3alias$"
- }
+ "not": {
+ "pattern": "^xn--|-s3alias$"
+ }
+ }
+ ]
+ },
+ "Types.AwsS3BucketNamePrefix": {
+ "type": "string",
+ "allOf": [
+ {
+ "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$"
+ },
+ {
+ "not": {
+ "pattern": "^xn--|-s3alias$"
+ }
+ }
+ ]
+ },
+ "Types.AwsS3KeyPrefix": {
+ "type": "string",
+ "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$",
+ "maxLength": 960
+ },
+ "Types.KubeLabels": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "Types.KubeTaints": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$"
+ }
+ },
+ "Types.KubeNodeSelector": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "Types.KubeToleration": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "effect": {
+ "type": "string",
+ "enum": [
+ "NoSchedule",
+ "PreferNoSchedule",
+ "NoExecute"
+ ]
+ },
+ "operator": {
+ "type": "string",
+ "enum": [
+ "Exists",
+ "Equal"
+ ]
+ },
+ "key": {
+ "type": "string",
+ "description": "The key of the toleration"
+ },
+ "value": {
+ "type": "string",
+ "description": "The value of the toleration"
+ }
+ },
+ "required": [
+ "effect",
+ "key"
+ ],
+ "anyOf": [
+ {
+ "required": [
+ "operator"
+ ]
+ },
+ {
+ "required": [
+ "value"
+ ]
}
]
},
- "Types.AwsS3BucketNamePrefix": {
- "type": "string",
- "allOf": [
- {
- "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$"
+ "Types.KubeResources": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "requests": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "cpu": {
+ "type": "string",
+ "description": "The CPU request for the Pod, in cores. Example: `500m`."
+ },
+ "memory": {
+ "type": "string",
+ "description": "The memory request for the Pod. Example: `500M`."
+ }
+ }
},
- {
- "not": {
- "pattern": "^xn--|-s3alias$"
+ "limits": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "cpu": {
+ "type": "string",
+ "description": "The CPU limit for the Pod. Example: `1000m`."
+ },
+ "memory": {
+ "type": "string",
+ "description": "The memory limit for the Pod. Example: `1G`."
+ }
}
}
- ]
- },
- "Types.AwsS3KeyPrefix": {
- "type": "string",
- "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$",
- "maxLength": 960
- },
- "Types.AwsSshPubKey": {
- "type": "string",
- "pattern": "^ssh\\-(ed25519|rsa)\\s+"
- },
- "Types.AwsSubnetId": {
- "type": "string",
- "pattern": "^subnet\\-[0-9a-f]{17}$"
- },
- "Types.AwsTags": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
}
},
- "Types.AwsVpcId": {
- "type": "string",
- "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$"
- },
- "Types.Cidr": {
- "type": "string",
- "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$"
- },
- "Types.EnvRef": {
- "type": "string",
- "pattern": "\\{^env\\:\\/\\/.*\\}$"
- },
- "Types.FileRef": {
- "type": "string",
- "pattern": "^\\{file\\:\\/\\/.+\\}$"
- },
- "Types.FuryModuleComponentOverrides": {
+ "Types.FuryModuleOverrides": {
"type": "object",
+ "description": "Override the common configuration with a particular configuration for the module.",
"additionalProperties": false,
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the minio module"
+ "description": "Set to override the node selector used to place the pods of the module."
},
"tolerations": {
"type": [
@@ -2656,17 +2964,23 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the cert-manager module"
+ "description": "Set to override the tolerations that will be added to the pods of the module."
+ },
+ "ingresses": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/$defs/Types.FuryModuleOverridesIngress"
+ }
}
}
},
- "Types.FuryModuleComponentOverridesWithIAMRoleName": {
+ "Types.FuryModuleComponentOverrides": {
"type": "object",
"additionalProperties": false,
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the load balancer controller module"
+ "description": "Set to override the node selector used to place the pods of the package."
},
"tolerations": {
"type": [
@@ -2676,20 +2990,17 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the cluster autoscaler module"
- },
- "iamRoleName": {
- "$ref": "#/$defs/Types.AwsIamRoleName"
+ "description": "Set to override the tolerations that will be added to the pods of the package."
}
}
},
- "Types.FuryModuleOverrides": {
+ "Types.FuryModuleComponentOverridesWithIAMRoleName": {
"type": "object",
"additionalProperties": false,
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the dr module"
+ "description": "The node selector to use to place the pods for the load balancer controller module."
},
"tolerations": {
"type": [
@@ -2699,13 +3010,10 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the monitoring module"
+ "description": "The tolerations that will be added to the pods for the cluster autoscaler module."
},
- "ingresses": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/$defs/Types.FuryModuleOverridesIngress"
- }
+ "iamRoleName": {
+ "$ref": "#/$defs/Types.AwsIamRoleName"
}
}
},
@@ -2715,167 +3023,68 @@
"properties": {
"disableAuth": {
"type": "boolean",
- "description": "If true, the ingress will not have authentication"
+ "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth."
},
"host": {
"type": "string",
- "description": "The host of the ingress"
+ "description": "Use this host for the ingress instead of the default one."
},
"ingressClass": {
"type": "string",
- "description": "The ingress class of the ingress"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
}
},
- "Types.IpAddress": {
- "type": "string",
- "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$"
- },
- "Types.KubeLabels": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "Types.KubeNodeSelector": {
- "type": [
- "object",
- "null"
- ],
- "additionalProperties": {
- "type": "string"
- }
- },
- "Types.KubeResources": {
+ "Spec.Distribution.Modules.Aws.ClusterAutoscaler": {
"type": "object",
"additionalProperties": false,
"properties": {
- "requests": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "cpu": {
- "type": "string",
- "description": "The cpu request for the prometheus pods"
- },
- "memory": {
- "type": "string",
- "description": "The memory request for the opensearch pods"
- }
- }
+ "iamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
},
- "limits": {
- "type": "object",
- "additionalProperties": false,
- "properties": {
- "cpu": {
- "type": "string",
- "description": "The cpu limit for the opensearch pods"
- },
- "memory": {
- "type": "string",
- "description": "The memory limit for the opensearch pods"
- }
- }
+ "overrides": {
+ "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName"
}
- }
- },
- "Types.KubeTaints": {
- "type": "array",
- "items": {
- "type": "string",
- "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$"
- }
+ },
+ "required": [
+ "iamRoleArn"
+ ]
},
- "Types.KubeToleration": {
+ "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": {
"type": "object",
"additionalProperties": false,
"properties": {
- "effect": {
- "type": "string",
- "enum": [
- "NoSchedule",
- "PreferNoSchedule",
- "NoExecute"
- ]
- },
- "operator": {
- "type": "string",
- "enum": [
- "Exists",
- "Equal"
- ]
+ "iamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
},
- "key": {
- "type": "string",
- "description": "The key of the toleration"
+ "region": {
+ "$ref": "#/$defs/Types.AwsRegion"
},
- "value": {
- "type": "string",
- "description": "The value of the toleration"
+ "hostedZoneId": {
+ "type": "string"
}
},
"required": [
- "effect",
- "key"
- ],
- "anyOf": [
- {
- "required": [
- "operator"
- ]
- },
- {
- "required": [
- "value"
- ]
- }
+ "hostedZoneId",
+ "iamRoleArn",
+ "region"
]
},
- "Types.SemVer": {
- "type": "string",
- "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"
- },
- "Types.SshPubKey": {
- "type": "string",
- "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+"
- },
- "Types.TcpPort": {
- "type": "integer",
- "minimum": 0,
- "maximum": 65535
- },
- "Types.Uri": {
- "type": "string",
- "pattern": "^(http|https)\\:\\/\\/.+$"
- }
- },
- "$schema": "http://json-schema.org/draft-07/schema#",
- "additionalProperties": false,
- "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service",
- "properties": {
- "apiVersion": {
- "type": "string",
- "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$"
- },
- "kind": {
- "type": "string",
- "enum": [
- "EKSCluster"
+ "Spec.Distribution.Modules.Ingress.ExternalDNS": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "privateIamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ },
+ "publicIamRoleArn": {
+ "$ref": "#/$defs/Types.AwsArn"
+ }
+ },
+ "required": [
+ "privateIamRoleArn",
+ "publicIamRoleArn"
]
- },
- "metadata": {
- "$ref": "#/$defs/Metadata"
- },
- "spec": {
- "$ref": "#/$defs/Spec"
}
- },
- "required": [
- "apiVersion",
- "kind",
- "metadata",
- "spec"
- ],
- "type": "object"
+ }
}
diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json
index 00008a0da..d37497bc4 100644
--- a/schemas/public/ekscluster-kfd-v1alpha2.json
+++ b/schemas/public/ekscluster-kfd-v1alpha2.json
@@ -1,6 +1,6 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
- "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service",
+ "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).",
"type": "object",
"properties": {
"apiVersion": {
@@ -34,6 +34,7 @@
"properties": {
"name": {
"type": "string",
+ "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.",
"minLength": 1,
"maxLength": 56
}
@@ -48,17 +49,20 @@
"properties": {
"distributionVersion": {
"type": "string",
+ "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.",
"minLength": 1
},
"region": {
- "$ref": "#/$defs/Types.AwsRegion"
+ "$ref": "#/$defs/Types.AwsRegion",
+ "description": "Defines in which AWS region the cluster and all the related resources will be created."
},
"tags": {
"$ref": "#/$defs/Types.AwsTags",
"description": "This map defines which will be the common tags that will be added to all the resources created on AWS."
},
"toolsConfiguration": {
- "$ref": "#/$defs/Spec.ToolsConfiguration"
+ "$ref": "#/$defs/Spec.ToolsConfiguration",
+ "description": "Configuration for tools used by furyctl, like Terraform."
},
"infrastructure": {
"$ref": "#/$defs/Spec.Infrastructure"
@@ -155,6 +159,7 @@
"Spec.ToolsConfiguration.Terraform.State": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for storing the Terraform state of the cluster.",
"properties": {
"s3": {
"$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3"
@@ -167,22 +172,23 @@
"Spec.ToolsConfiguration.Terraform.State.S3": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the S3 bucket used to store the Terraform state.",
"properties": {
"bucketName": {
"$ref": "#/$defs/Types.AwsS3BucketName",
- "description": "This value defines which bucket will be used to store all the states"
+ "description": "This value defines which bucket will be used to store all the states."
},
"keyPrefix": {
"$ref": "#/$defs/Types.AwsS3KeyPrefix",
- "description": "This value defines which folder will be used to store all the states inside the bucket"
+ "description": "This value defines which folder will be used to store all the states inside the bucket."
},
"region": {
"$ref": "#/$defs/Types.AwsRegion",
- "description": "This value defines in which region the bucket is located"
+ "description": "This value defines in which region the bucket is located."
},
"skipRegionValidation": {
"type": "boolean",
- "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region"
+ "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region."
}
},
"required": [
@@ -196,12 +202,10 @@
"additionalProperties": false,
"properties": {
"vpc": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpc",
- "description": "This key defines the VPC that will be created in AWS"
+ "$ref": "#/$defs/Spec.Infrastructure.Vpc"
},
"vpn": {
- "$ref": "#/$defs/Spec.Infrastructure.Vpn",
- "description": "This section defines the creation of VPN bastions"
+ "$ref": "#/$defs/Spec.Infrastructure.Vpn"
}
},
"allOf": [
@@ -279,6 +283,7 @@
},
"Spec.Infrastructure.Vpc": {
"type": "object",
+ "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.",
"additionalProperties": false,
"properties": {
"network": {
@@ -295,7 +300,7 @@
"properties": {
"cidr": {
"$ref": "#/$defs/Types.Cidr",
- "description": "This is the CIDR of the VPC that will be created"
+ "description": "The network CIDR for the VPC that will be created"
},
"subnetsCidrs": {
"$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs"
@@ -308,6 +313,7 @@
},
"Spec.Infrastructure.Vpc.Network.SubnetsCidrs": {
"type": "object",
+ "description": "Network CIDRS configuration for private and public subnets.",
"additionalProperties": false,
"properties": {
"private": {
@@ -315,14 +321,14 @@
"items": {
"$ref": "#/$defs/Types.Cidr"
},
- "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created"
+ "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created"
},
"public": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.Cidr"
},
- "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created"
+ "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created"
}
},
"required": [
@@ -332,50 +338,51 @@
},
"Spec.Infrastructure.Vpn": {
"type": "object",
+ "description": "Configuration for the VPN server instances.",
"additionalProperties": false,
"properties": {
"instances": {
"type": "integer",
- "description": "The number of instances to create, 0 to skip the creation"
+ "description": "The number of VPN server instances to create, `0` to skip the creation."
},
"port": {
"$ref": "#/$defs/Types.TcpPort",
- "description": "The port used by the OpenVPN server"
+ "description": "The port where each OpenVPN server will listen for connections."
},
"instanceType": {
"type": "string",
- "description": "The size of the AWS EC2 instance"
+ "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`."
},
"diskSize": {
"type": "integer",
- "description": "The size of the disk in GB"
+ "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB."
},
"operatorName": {
"type": "string",
- "description": "The username of the account to create in the bastion's operating system"
+ "description": "The username of the account to create in the bastion's operating system."
},
"dhParamsBits": {
"type": "integer",
- "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file"
+ "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file."
},
"vpnClientsSubnetCidr": {
"$ref": "#/$defs/Types.Cidr",
- "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected"
+ "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected."
},
"ssh": {
"$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh"
},
"vpcId": {
"$ref": "#/$defs/Types.AwsVpcId",
- "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted"
+ "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted."
},
"bucketNamePrefix": {
"$ref": "#/$defs/Types.AwsS3BucketNamePrefix",
- "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states"
+ "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)."
},
"iamUserNameOverride": {
"$ref": "#/$defs/Types.AwsIamRoleName",
- "description": "Overrides the default IAM user name for the VPN"
+ "description": "Overrides IAM user name for the VPN. Default is to use the cluster name."
}
},
"required": [
@@ -399,7 +406,7 @@
}
]
},
- "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented"
+ "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system."
},
"githubUsersName": {
"type": "array",
@@ -407,14 +414,14 @@
"type": "string"
},
"minItems": 1,
- "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user"
+ "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user."
},
"allowedFromCidrs": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.Cidr"
},
- "description": "The CIDR enabled in the security group that can access the bastions in SSH"
+ "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source."
}
},
"required": [
@@ -424,33 +431,34 @@
},
"Spec.Kubernetes": {
"type": "object",
+ "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.",
"additionalProperties": false,
"properties": {
"vpcId": {
"$ref": "#/$defs/Types.AwsVpcId",
- "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted"
+ "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created."
},
"clusterIAMRoleNamePrefixOverride": {
"$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
- "description": "Overrides the default IAM role name prefix for the EKS cluster"
+ "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name."
},
"workersIAMRoleNamePrefixOverride": {
"$ref": "#/$defs/Types.AwsIamRoleNamePrefix",
- "description": "Overrides the default IAM role name prefix for the EKS workers"
+ "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name."
},
"subnetIds": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.AwsSubnetId"
},
- "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted"
+ "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created."
},
"apiServer": {
"$ref": "#/$defs/Spec.Kubernetes.APIServer"
},
"serviceIpV4Cidr": {
"$ref": "#/$defs/Types.Cidr",
- "description": "This value defines the CIDR that will be used to assign IP addresses to the services"
+ "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services."
},
"nodeAllowedSshPublicKey": {
"anyOf": [
@@ -461,7 +469,7 @@
"$ref": "#/$defs/Types.FileRef"
}
],
- "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user"
+ "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file."
},
"nodePoolsLaunchKind": {
"type": "string",
@@ -470,11 +478,44 @@
"launch_templates",
"both"
],
- "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim."
+ "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim."
+ },
+ "nodePoolGlobalAmiType": {
+ "type": "string",
+ "enum": [
+ "alinux2",
+ "alinux2023"
+ ],
+ "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool."
},
"logRetentionDays": {
"type": "integer",
- "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days."
+ "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.",
+ "enum": [
+ 0,
+ 1,
+ 3,
+ 5,
+ 7,
+ 14,
+ 30,
+ 60,
+ 90,
+ 120,
+ 150,
+ 180,
+ 365,
+ 400,
+ 545,
+ 731,
+ 1096,
+ 1827,
+ 2192,
+ 2557,
+ 2922,
+ 3288,
+ 3653
+ ]
},
"logsTypes": {
"type": "array",
@@ -505,7 +546,8 @@
"apiServer",
"nodeAllowedSshPublicKey",
"nodePools",
- "nodePoolsLaunchKind"
+ "nodePoolsLaunchKind",
+ "nodePoolGlobalAmiType"
]
},
"Spec.Kubernetes.APIServer": {
@@ -514,7 +556,7 @@
"properties": {
"privateAccess": {
"type": "boolean",
- "description": "This value defines if the API server will be accessible only from the private subnets"
+ "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`."
},
"privateAccessCidrs": {
"type": "array",
@@ -522,7 +564,7 @@
"$ref": "#/$defs/Types.Cidr"
},
"minItems": 0,
- "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets"
+ "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server."
},
"publicAccessCidrs": {
"type": "array",
@@ -530,11 +572,11 @@
"$ref": "#/$defs/Types.Cidr"
},
"minItems": 0,
- "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets"
+ "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server."
},
"publicAccess": {
"type": "boolean",
- "description": "This value defines if the API server will be accessible from the public subnets"
+ "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`."
}
},
"required": [
@@ -545,8 +587,10 @@
"Spec.Kubernetes.NodePool": {
"type": "object",
"additionalProperties": false,
+ "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.",
"properties": {
"type": {
+ "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.",
"type": "string",
"enum": [
"eks-managed",
@@ -555,7 +599,7 @@
},
"name": {
"type": "string",
- "description": "The name of the node pool"
+ "description": "The name of the node pool."
},
"ami": {
"$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami"
@@ -566,7 +610,7 @@
"docker",
"containerd"
],
- "description": "The container runtime to use for the nodes"
+ "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`."
},
"size": {
"$ref": "#/$defs/Spec.Kubernetes.NodePool.Size"
@@ -579,26 +623,26 @@
"items": {
"$ref": "#/$defs/Types.AwsArn"
},
- "description": "This optional array defines additional target groups to attach to the instances in the node pool"
+ "description": "This optional array defines additional target groups to attach to the instances in the node pool."
},
"labels": {
"$ref": "#/$defs/Types.KubeLabels",
- "description": "Kubernetes labels that will be added to the nodes"
+ "description": "Kubernetes labels that will be added to the nodes."
},
"taints": {
"$ref": "#/$defs/Types.KubeTaints",
- "description": "Kubernetes taints that will be added to the nodes"
+ "description": "Kubernetes taints that will be added to the nodes."
},
"tags": {
"$ref": "#/$defs/Types.AwsTags",
- "description": "AWS tags that will be added to the ASG and EC2 instances"
+ "description": "AWS tags that will be added to the ASG and EC2 instances."
},
"subnetIds": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.AwsSubnetId"
},
- "description": "This value defines the subnet IDs where the nodes will be created"
+ "description": "Optional list of subnet IDs where to create the nodes."
},
"additionalFirewallRules": {
"$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules"
@@ -607,45 +651,124 @@
"required": [
"instance",
"name",
- "size"
- ]
+ "size",
+ "type"
+ ],
+ "if": {
+ "allOf": [
+ {
+ "properties": {
+ "type": {
+ "enum": [
+ "eks-managed"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "then": {
+ "properties": {
+ "ami": {
+ "properties": {
+ "id": {
+ "type": "null"
+ },
+ "owner": {
+ "type": "null"
+ }
+ }
+ }
+ }
+ }
},
"Spec.Kubernetes.NodePool.Ami": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.",
"properties": {
"id": {
"type": "string",
- "description": "The AMI ID to use for the nodes"
+ "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`."
},
"owner": {
"type": "string",
- "description": "The owner of the AMI"
+ "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`."
+ },
+ "type": {
+ "type": "string",
+ "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.",
+ "enum": [
+ "alinux2",
+ "alinux2023"
+ ]
}
},
- "required": [
- "id",
- "owner"
+ "oneOf": [
+ {
+ "allOf": [
+ {
+ "required": [
+ "id",
+ "owner"
+ ]
+ },
+ {
+ "not": {
+ "required": [
+ "type"
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "allOf": [
+ {
+ "required": [
+ "type"
+ ]
+ },
+ {
+ "not": {
+ "anyOf": [
+ {
+ "required": [
+ "id"
+ ]
+ },
+ {
+ "required": [
+ "owner"
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
]
},
"Spec.Kubernetes.NodePool.Instance": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the instances that will be used in the node pool.",
"properties": {
"type": {
"type": "string",
- "description": "The instance type to use for the nodes"
+ "description": "The instance type to use for the nodes."
},
"spot": {
"type": "boolean",
- "description": "If true, the nodes will be created as spot instances"
+ "description": "If `true`, the nodes will be created as spot instances. Default is `false`."
},
"volumeSize": {
"type": "integer",
- "description": "The size of the disk in GB"
+ "description": "The size of the disk in GB."
},
"volumeType": {
"type": "string",
+ "description": "Volume type for the instance disk. Default is `gp2`.",
"enum": [
"gp2",
"gp3",
@@ -654,7 +777,8 @@
]
},
"maxPods": {
- "type": "integer"
+ "type": "integer",
+ "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt"
}
},
"required": [
@@ -668,12 +792,12 @@
"min": {
"type": "integer",
"minimum": 0,
- "description": "The minimum number of nodes in the node pool"
+ "description": "The minimum number of nodes in the node pool."
},
"max": {
"type": "integer",
"minimum": 0,
- "description": "The maximum number of nodes in the node pool"
+ "description": "The maximum number of nodes in the node pool."
}
},
"required": [
@@ -684,6 +808,7 @@
"Spec.Kubernetes.NodePool.AdditionalFirewallRules": {
"type": "object",
"additionalProperties": false,
+ "description": "Optional additional firewall rules that will be attached to the nodes.",
"properties": {
"cidrBlocks": {
"type": "array",
@@ -691,7 +816,8 @@
"$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock"
},
"minItems": 1,
- "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored."
+ "maxItems": 1,
+ "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details."
},
"sourceSecurityGroupId": {
"type": "array",
@@ -718,13 +844,15 @@
},
"type": {
"type": "string",
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.",
"enum": [
"ingress",
"egress"
]
},
"tags": {
- "$ref": "#/$defs/Types.AwsTags"
+ "$ref": "#/$defs/Types.AwsTags",
+ "description": "Additional AWS tags for the Firewall rule."
},
"cidrBlocks": {
"type": "array",
@@ -754,7 +882,7 @@
"properties": {
"name": {
"type": "string",
- "description": "The name of the FW rule"
+ "description": "The name for the additional Firewall rule Security Group."
},
"type": {
"type": "string",
@@ -762,19 +890,19 @@
"ingress",
"egress"
],
- "description": "The type of the FW rule can be ingress or egress"
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic."
},
"tags": {
"$ref": "#/$defs/Types.AwsTags",
- "description": "The tags of the FW rule"
+ "description": "Additional AWS tags for the Firewall rule."
},
"sourceSecurityGroupId": {
"type": "string",
- "description": "The source security group ID"
+ "description": "The source security group ID."
},
"protocol": {
"$ref": "#/$defs/Types.AwsIpProtocol",
- "description": "The protocol of the FW rule"
+ "description": "The protocol of the Firewall rule."
},
"ports": {
"$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
@@ -794,7 +922,7 @@
"properties": {
"name": {
"type": "string",
- "description": "The name of the FW rule"
+ "description": "The name of the Firewall rule."
},
"type": {
"type": "string",
@@ -802,19 +930,19 @@
"ingress",
"egress"
],
- "description": "The type of the FW rule can be ingress or egress"
+ "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic."
},
"tags": {
"$ref": "#/$defs/Types.AwsTags",
- "description": "The tags of the FW rule"
+ "description": "Additional AWS tags for the Firewall rule."
},
"self": {
"type": "boolean",
- "description": "If true, the source will be the security group itself"
+ "description": "If `true`, the source will be the security group itself."
},
"protocol": {
"$ref": "#/$defs/Types.AwsIpProtocol",
- "description": "The protocol of the FW rule"
+ "description": "The protocol of the Firewall rule."
},
"ports": {
"$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports"
@@ -830,6 +958,7 @@
},
"Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": {
"type": "object",
+ "description": "Port range for the Firewall Rule.",
"additionalProperties": false,
"properties": {
"from": {
@@ -846,6 +975,7 @@
},
"Spec.Kubernetes.AwsAuth": {
"type": "object",
+ "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html",
"additionalProperties": false,
"properties": {
"additionalAccounts": {
@@ -853,21 +983,21 @@
"items": {
"type": "string"
},
- "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap"
+ "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap."
},
"users": {
"type": "array",
"items": {
"$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User"
},
- "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap"
+ "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap."
},
"roles": {
"type": "array",
"items": {
"$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role"
},
- "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap"
+ "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap."
}
}
},
@@ -1004,28 +1134,29 @@
"Spec.Distribution.Common": {
"type": "object",
"additionalProperties": false,
+ "description": "Common configuration for all the distribution modules.",
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for all the KFD modules"
+ "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`."
},
"tolerations": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for all the KFD modules"
+ "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```"
},
"provider": {
"$ref": "#/$defs/Spec.Distribution.Common.Provider"
},
"relativeVendorPath": {
"type": "string",
- "description": "The relative path to the vendor directory, does not need to be changed"
+ "description": "The relative path to the vendor directory, does not need to be changed."
},
"registry": {
"type": "string",
- "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too."
+ "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too."
}
}
},
@@ -1035,7 +1166,7 @@
"properties": {
"type": {
"type": "string",
- "description": "The type of the provider, must be EKS if specified"
+ "description": "The provider type. Don't set. FOR INTERNAL USE ONLY."
}
},
"required": [
@@ -1090,14 +1221,15 @@
},
"baseDomain": {
"type": "string",
- "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone"
+ "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone."
},
"nginx": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx",
- "description": "Configurations for the nginx ingress controller module"
+ "description": "Configurations for the Ingress nginx controller package."
},
"certManager": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager"
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager",
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses."
},
"dns": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS"
@@ -1189,20 +1321,21 @@
"Spec.Distribution.Modules.Ingress.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Ingress module.",
"properties": {
"ingresses": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses"
},
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the ingress module"
+ "description": "Set to override the node selector used to place the pods of the Ingress module."
},
"tolerations": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the ingress module"
+ "description": "Set to override the tolerations that will be added to the pods of the Ingress module."
}
}
},
@@ -1235,7 +1368,7 @@
"single",
"dual"
],
- "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***"
+ "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`."
},
"tls": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS"
@@ -1259,7 +1392,7 @@
"secret",
"none"
],
- "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***"
+ "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`."
},
"secret": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret"
@@ -1284,16 +1417,19 @@
"Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": {
"type": "object",
"additionalProperties": false,
+ "description": "Kubernetes TLS secret for the ingresses TLS certificate.",
"properties": {
"cert": {
"type": "string",
- "description": "The certificate file content or you can use the file notation to get the content from a file"
+ "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"key": {
- "type": "string"
+ "type": "string",
+ "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"ca": {
- "type": "string"
+ "type": "string",
+ "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
}
},
"required": [
@@ -1305,6 +1441,7 @@
"Spec.Distribution.Modules.Ingress.CertManager": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.",
"properties": {
"clusterIssuer": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer"
@@ -1320,15 +1457,16 @@
"Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.",
"properties": {
"name": {
"type": "string",
- "description": "The name of the cluster issuer"
+ "description": "The name of the clusterIssuer."
},
"email": {
"type": "string",
"format": "email",
- "description": "The email of the cluster issuer"
+ "description": "The email address to use during the certificate issuing process."
},
"type": {
"type": "string",
@@ -1336,11 +1474,11 @@
"dns01",
"http01"
],
- "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***"
+ "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge."
},
"solvers": {
"type": "array",
- "description": "The custom solvers configurations"
+ "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field."
}
},
"required": [
@@ -1362,6 +1500,7 @@
},
"Spec.Distribution.Modules.Ingress.DNS": {
"type": "object",
+ "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.",
"additionalProperties": false,
"properties": {
"public": {
@@ -1381,11 +1520,11 @@
"properties": {
"name": {
"type": "string",
- "description": "The name of the public hosted zone"
+ "description": "The name of the public hosted zone."
},
"create": {
"type": "boolean",
- "description": "If true, the public hosted zone will be created"
+ "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead."
}
},
"required": [
@@ -1395,15 +1534,16 @@
},
"Spec.Distribution.Modules.Ingress.DNS.Private": {
"type": "object",
+ "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.",
"additionalProperties": false,
"properties": {
"name": {
"type": "string",
- "description": "The name of the private hosted zone"
+ "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`."
},
"create": {
"type": "boolean",
- "description": "If true, the private hosted zone will be created"
+ "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead."
}
},
"required": [
@@ -1414,6 +1554,7 @@
"Spec.Distribution.Modules.Logging": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1426,7 +1567,7 @@
"loki",
"customOutputs"
],
- "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage."
+ "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`."
},
"opensearch": {
"$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch"
@@ -1465,6 +1606,20 @@
]
}
},
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "loki"
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "loki"
+ ]
+ }
+ },
{
"if": {
"properties": {
@@ -1491,14 +1646,14 @@
"single",
"triple"
],
- "description": "The type of the opensearch, must be ***single*** or ***triple***"
+ "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment."
},
"resources": {
"$ref": "#/$defs/Types.KubeResources"
},
"storageSize": {
"type": "string",
- "description": "The storage size for the opensearch pods"
+ "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1510,6 +1665,7 @@
},
"Spec.Distribution.Modules.Logging.Cerebro": {
"type": "object",
+ "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.",
"additionalProperties": false,
"properties": {
"overrides": {
@@ -1520,10 +1676,11 @@
"Spec.Distribution.Modules.Logging.Minio": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Logging's MinIO deployment.",
"properties": {
"storageSize": {
"type": "string",
- "description": "The PVC size for each minio disk, 6 disks total"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -1531,11 +1688,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username of the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password of the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -1546,10 +1703,12 @@
},
"Spec.Distribution.Modules.Logging.Loki": {
"type": "object",
+ "description": "Configuration for the Loki package.",
"additionalProperties": false,
"properties": {
"backend": {
"type": "string",
+ "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.",
"enum": [
"minio",
"externalEndpoint"
@@ -1558,37 +1717,47 @@
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Loki's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the loki external endpoint"
+ "description": "External S3-compatible endpoint for Loki's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the loki external endpoint will be insecure"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the loki external endpoint"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the loki external endpoint"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the loki external endpoint"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
+ "tsdbStartDate": {
+ "type": "string",
+ "format": "date",
+ "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`."
+ },
"resources": {
"$ref": "#/$defs/Types.KubeResources"
}
- }
+ },
+ "required": [
+ "tsdbStartDate"
+ ]
},
"Spec.Distribution.Modules.Logging.Operator": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging Operator.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1596,41 +1765,41 @@
}
},
"Spec.Distribution.Modules.Logging.CustomOutputs": {
- "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
+ "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
"type": "object",
"additionalProperties": false,
"properties": {
"audit": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"events": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"infra": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"ingressNginx": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"kubernetes": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"systemdCommon": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"systemdEtcd": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"errors": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
}
},
"required": [
@@ -1647,7 +1816,7 @@
"Spec.Distribution.Modules.Monitoring": {
"type": "object",
"additionalProperties": false,
- "description": "configuration for the Monitoring module components",
+ "description": "Configuration for the Monitoring module.",
"properties": {
"type": {
"type": "string",
@@ -1657,7 +1826,7 @@
"prometheusAgent",
"mimir"
],
- "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage."
+ "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1703,15 +1872,15 @@
},
"retentionTime": {
"type": "string",
- "description": "The retention time for the k8s Prometheus instance."
+ "description": "The retention time for the `k8s` Prometheus instance."
},
"retentionSize": {
"type": "string",
- "description": "The retention size for the k8s Prometheus instance."
+ "description": "The retention size for the `k8s` Prometheus instance."
},
"storageSize": {
"type": "string",
- "description": "The storage size for the k8s Prometheus instance."
+ "description": "The storage size for the `k8s` Prometheus instance."
},
"remoteWrite": {
"description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).",
@@ -1744,15 +1913,15 @@
"properties": {
"deadManSwitchWebhookUrl": {
"type": "string",
- "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io"
+ "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io."
},
"installDefaultRules": {
"type": "boolean",
- "description": "If true, the default rules will be installed"
+ "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution."
},
"slackWebhookUrl": {
"type": "string",
- "description": "The slack webhook url to send alerts"
+ "description": "The Slack webhook URL where to send the infrastructural and workload alerts to."
}
}
},
@@ -1803,10 +1972,11 @@
"Spec.Distribution.Modules.Monitoring.Mimir": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Mimir package.",
"properties": {
"retentionTime": {
"type": "string",
- "description": "The retention time for the mimir pods"
+ "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days."
},
"backend": {
"type": "string",
@@ -1814,31 +1984,32 @@
"minio",
"externalEndpoint"
],
- "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***"
+ "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Mimir's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the external mimir backend"
+ "description": "The external S3-compatible endpoint for Mimir's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the external mimir backend will not use tls"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the external mimir backend"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the external mimir backend"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the external mimir backend"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
@@ -1849,11 +2020,12 @@
},
"Spec.Distribution.Modules.Monitoring.Minio": {
"type": "object",
+ "description": "Configuration for Monitoring's MinIO deployment.",
"additionalProperties": false,
"properties": {
"storageSize": {
"type": "string",
- "description": "The storage size for the minio pods"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -1861,11 +2033,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username for the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password for the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -1877,6 +2049,7 @@
"Spec.Distribution.Modules.Tracing": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tracing module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1887,7 +2060,7 @@
"none",
"tempo"
],
- "description": "The type of tracing to use, either ***none*** or ***tempo***"
+ "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`."
},
"tempo": {
"$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo"
@@ -1903,10 +2076,11 @@
"Spec.Distribution.Modules.Tracing.Tempo": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tempo package.",
"properties": {
"retentionTime": {
"type": "string",
- "description": "The retention time for the tempo pods"
+ "description": "The retention time for the traces stored in Tempo."
},
"backend": {
"type": "string",
@@ -1914,31 +2088,32 @@
"minio",
"externalEndpoint"
],
- "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***"
+ "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
"externalEndpoint": {
+ "description": "Configuration for Tempo's external storage backend.",
"type": "object",
"additionalProperties": false,
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the external tempo backend"
+ "description": "The external S3-compatible endpoint for Tempo's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the external tempo backend will not use tls"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the external tempo backend"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the external tempo backend"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the external tempo backend"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
@@ -1949,11 +2124,12 @@
},
"Spec.Distribution.Modules.Tracing.Minio": {
"type": "object",
+ "description": "Configuration for Tracing's MinIO deployment.",
"additionalProperties": false,
"properties": {
"storageSize": {
"type": "string",
- "description": "The storage size for the minio pods"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -1961,11 +2137,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username for the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password for the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -1977,9 +2153,10 @@
"Spec.Distribution.Modules.Networking": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Networking module.",
"properties": {
"overrides": {
- "$ref": "#/$defs/Types.FuryModuleComponentOverrides"
+ "$ref": "#/$defs/Types.FuryModuleOverrides"
},
"tigeraOperator": {
"$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator"
@@ -1998,6 +2175,7 @@
"Spec.Distribution.Modules.Policy": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Policy module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -2009,7 +2187,7 @@
"gatekeeper",
"kyverno"
],
- "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***"
+ "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`."
},
"gatekeeper": {
"$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper"
@@ -2055,6 +2233,7 @@
"Spec.Distribution.Modules.Policy.Gatekeeper": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Gatekeeper package.",
"properties": {
"additionalExcludedNamespaces": {
"type": "array",
@@ -2070,11 +2249,11 @@
"dryrun",
"warn"
],
- "description": "The enforcement action to use for the gatekeeper module"
+ "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations."
},
"installDefaultPolicies": {
"type": "boolean",
- "description": "If true, the default policies will be installed"
+ "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -2088,13 +2267,14 @@
"Spec.Distribution.Modules.Policy.Kyverno": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Kyverno package.",
"properties": {
"additionalExcludedNamespaces": {
"type": "array",
"items": {
"type": "string"
},
- "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them."
+ "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them."
},
"validationFailureAction": {
"type": "string",
@@ -2102,11 +2282,11 @@
"Audit",
"Enforce"
],
- "description": "The validation failure action to use for the kyverno module"
+ "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies."
},
"installDefaultPolicies": {
"type": "boolean",
- "description": "If true, the default policies will be installed"
+ "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -2120,6 +2300,7 @@
"Spec.Distribution.Modules.Dr": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Disaster Recovery module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -2130,7 +2311,7 @@
"none",
"eks"
],
- "description": "The type of the DR, must be ***none*** or ***eks***"
+ "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`."
},
"velero": {
"$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero"
@@ -2166,24 +2347,46 @@
"type": "boolean",
"description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`."
},
- "cron": {
+ "definitions": {
"type": "object",
"additionalProperties": false,
- "description": "Configuration for Velero's schedules cron.",
+ "description": "Configuration for Velero schedules.",
"properties": {
"manifests": {
- "type": "string",
- "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
},
"full": {
- "type": "string",
- "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ },
+ "snapshotMoveData": {
+ "type": "boolean",
+ "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation."
+ }
+ }
}
}
- },
- "ttl": {
- "type": "string",
- "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
}
}
},
@@ -2204,12 +2407,12 @@
"properties": {
"region": {
"$ref": "#/$defs/Types.AwsRegion",
- "description": "The region where the velero bucket is located"
+ "description": "The region where the bucket for Velero will be located."
},
"bucketName": {
"$ref": "#/$defs/Types.AwsS3BucketName",
"maxLength": 49,
- "description": "The name of the velero bucket"
+ "description": "The name of the bucket for Velero."
}
},
"required": [
@@ -2220,6 +2423,7 @@
"Spec.Distribution.Modules.Auth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Auth module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides"
@@ -2229,7 +2433,7 @@
},
"baseDomain": {
"type": "string",
- "description": "The base domain for the auth module"
+ "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class."
},
"pomerium": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium"
@@ -2308,10 +2512,11 @@
"Spec.Distribution.Modules.Auth.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Auth module.",
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the auth module"
+ "description": "Set to override the node selector used to place the pods of the Auth module."
},
"tolerations": {
"type": [
@@ -2321,10 +2526,11 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the auth module"
+ "description": "Set to override the tolerations that will be added to the pods of the Auth module."
},
"ingresses": {
"type": "object",
+ "description": "Override the definition of the Auth module ingresses.",
"additionalProperties": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress"
}
@@ -2337,11 +2543,11 @@
"properties": {
"host": {
"type": "string",
- "description": "The host of the ingress"
+ "description": "Use this host for the ingress instead of the default one."
},
"ingressClass": {
"type": "string",
- "description": "The ingress class of the ingress"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
},
"required": [
@@ -2360,7 +2566,7 @@
"basicAuth",
"sso"
],
- "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***"
+ "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`."
},
"basicAuth": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth"
@@ -2373,14 +2579,15 @@
"Spec.Distribution.Modules.Auth.Provider.BasicAuth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the HTTP Basic Auth provider.",
"properties": {
"username": {
"type": "string",
- "description": "The username for the basic auth"
+ "description": "The username for logging in with the HTTP basic authentication."
},
"password": {
"type": "string",
- "description": "The password for the basic auth"
+ "description": "The password for logging in with the HTTP basic authentication."
}
},
"required": [
@@ -2394,14 +2601,15 @@
"Spec.Distribution.Modules.Auth.Dex": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Dex package.",
"properties": {
"connectors": {
"type": "array",
- "description": "The connectors for dex"
+ "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/"
},
"additionalStaticClients": {
"type": "array",
- "description": "The additional static clients for dex"
+ "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/"
},
"expiry": {
"type": "object",
@@ -2680,11 +2888,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu request for the prometheus pods"
+ "description": "The CPU request for the Pod, in cores. Example: `500m`."
},
"memory": {
"type": "string",
- "description": "The memory request for the opensearch pods"
+ "description": "The memory request for the Pod. Example: `500M`."
}
}
},
@@ -2694,11 +2902,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu limit for the opensearch pods"
+ "description": "The CPU limit for the Pod. Example: `1000m`."
},
"memory": {
"type": "string",
- "description": "The memory limit for the opensearch pods"
+ "description": "The memory limit for the Pod. Example: `1G`."
}
}
}
@@ -2706,11 +2914,12 @@
},
"Types.FuryModuleOverrides": {
"type": "object",
+ "description": "Override the common configuration with a particular configuration for the module.",
"additionalProperties": false,
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the dr module"
+ "description": "Set to override the node selector used to place the pods of the module."
},
"tolerations": {
"type": [
@@ -2720,7 +2929,7 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the monitoring module"
+ "description": "Set to override the tolerations that will be added to the pods of the module."
},
"ingresses": {
"type": "object",
@@ -2736,7 +2945,7 @@
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the minio module"
+ "description": "Set to override the node selector used to place the pods of the package."
},
"tolerations": {
"type": [
@@ -2746,7 +2955,7 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the cert-manager module"
+ "description": "Set to override the tolerations that will be added to the pods of the package."
}
}
},
@@ -2756,7 +2965,7 @@
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the load balancer controller module"
+ "description": "The node selector to use to place the pods for the load balancer controller module."
},
"tolerations": {
"type": [
@@ -2766,7 +2975,7 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the cluster autoscaler module"
+ "description": "The tolerations that will be added to the pods for the cluster autoscaler module."
},
"iamRoleName": {
"$ref": "#/$defs/Types.AwsIamRoleName"
@@ -2779,15 +2988,15 @@
"properties": {
"disableAuth": {
"type": "boolean",
- "description": "If true, the ingress will not have authentication"
+ "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth."
},
"host": {
"type": "string",
- "description": "The host of the ingress"
+ "description": "Use this host for the ingress instead of the default one."
},
"ingressClass": {
"type": "string",
- "description": "The ingress class of the ingress"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
}
}
diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json
index 3e4451b36..c2d0302b7 100644
--- a/schemas/public/kfddistribution-kfd-v1alpha2.json
+++ b/schemas/public/kfddistribution-kfd-v1alpha2.json
@@ -1,6 +1,6 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
- "description": "",
+ "description": "KFD modules deployed on top of an existing Kubernetes cluster.",
"type": "object",
"properties": {
"apiVersion": {
@@ -34,6 +34,7 @@
"properties": {
"name": {
"type": "string",
+ "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.",
"minLength": 1,
"maxLength": 56
}
@@ -48,6 +49,7 @@
"properties": {
"distributionVersion": {
"type": "string",
+ "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.",
"minLength": 1
},
"distribution": {
@@ -68,7 +70,7 @@
"properties": {
"kubeconfig": {
"type": "string",
- "description": "The kubeconfig file path"
+ "description": "The path to the kubeconfig file."
},
"common": {
"$ref": "#/$defs/Spec.Distribution.Common"
@@ -134,28 +136,29 @@
"Spec.Distribution.Common": {
"type": "object",
"additionalProperties": false,
+ "description": "Common configuration for all the distribution modules.",
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for all the KFD modules"
+ "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`."
},
"tolerations": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for all the KFD modules"
+ "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```"
},
"provider": {
"$ref": "#/$defs/Spec.Distribution.Common.Provider"
},
"relativeVendorPath": {
"type": "string",
- "description": "The relative path to the vendor directory, does not need to be changed"
+ "description": "The relative path to the vendor directory, does not need to be changed."
},
"registry": {
"type": "string",
- "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too."
+ "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too."
}
}
},
@@ -165,7 +168,7 @@
"properties": {
"type": {
"type": "string",
- "description": "The type of the provider"
+ "description": "The provider type. Don't set. FOR INTERNAL USE ONLY."
}
},
"required": [
@@ -217,14 +220,15 @@
},
"baseDomain": {
"type": "string",
- "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone"
+ "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class."
},
"nginx": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx",
- "description": "Configurations for the nginx ingress controller module"
+ "description": "Configurations for the Ingress nginx controller package."
},
"certManager": {
- "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager"
+ "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager",
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses."
},
"forecastle": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle"
@@ -258,20 +262,21 @@
"Spec.Distribution.Modules.Ingress.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Ingress module.",
"properties": {
"ingresses": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses"
},
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the ingress module"
+ "description": "Set to override the node selector used to place the pods of the Ingress module."
},
"tolerations": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the ingress module"
+ "description": "Set to override the tolerations that will be added to the pods of the Ingress module."
}
}
},
@@ -304,7 +309,7 @@
"single",
"dual"
],
- "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***"
+ "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`."
},
"tls": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS"
@@ -328,7 +333,7 @@
"secret",
"none"
],
- "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***"
+ "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`."
},
"secret": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret"
@@ -353,16 +358,19 @@
"Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": {
"type": "object",
"additionalProperties": false,
+ "description": "Kubernetes TLS secret for the ingresses TLS certificate.",
"properties": {
"cert": {
"type": "string",
- "description": "The certificate file content or you can use the file notation to get the content from a file"
+ "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"key": {
- "type": "string"
+ "type": "string",
+ "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file."
},
"ca": {
- "type": "string"
+ "type": "string",
+ "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file."
}
},
"required": [
@@ -374,6 +382,7 @@
"Spec.Distribution.Modules.Ingress.CertManager": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.",
"properties": {
"clusterIssuer": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer"
@@ -389,26 +398,27 @@
"Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.",
"properties": {
"name": {
"type": "string",
- "description": "The name of the cluster issuer"
+ "description": "The name of the clusterIssuer."
},
"email": {
"type": "string",
"format": "email",
- "description": "The email of the cluster issuer"
+ "description": "The email address to use during the certificate issuing process."
},
"type": {
"type": "string",
"enum": [
"http01"
],
- "description": "The type of the cluster issuer, must be ***http01***"
+ "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations."
},
"solvers": {
"type": "array",
- "description": "The custom solvers configurations"
+ "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field."
}
},
"required": [
@@ -431,6 +441,7 @@
"Spec.Distribution.Modules.Logging": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -443,7 +454,7 @@
"loki",
"customOutputs"
],
- "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage."
+ "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`."
},
"opensearch": {
"$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch"
@@ -482,6 +493,20 @@
]
}
},
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "loki"
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "loki"
+ ]
+ }
+ },
{
"if": {
"properties": {
@@ -508,14 +533,14 @@
"single",
"triple"
],
- "description": "The type of the opensearch, must be ***single*** or ***triple***"
+ "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment."
},
"resources": {
"$ref": "#/$defs/Types.KubeResources"
},
"storageSize": {
"type": "string",
- "description": "The storage size for the opensearch pods"
+ "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -527,6 +552,7 @@
},
"Spec.Distribution.Modules.Logging.Cerebro": {
"type": "object",
+ "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.",
"additionalProperties": false,
"properties": {
"overrides": {
@@ -537,10 +563,11 @@
"Spec.Distribution.Modules.Logging.Minio": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Logging's MinIO deployment.",
"properties": {
"storageSize": {
"type": "string",
- "description": "The PVC size for each minio disk, 6 disks total"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -548,11 +575,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username of the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password of the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -563,10 +590,12 @@
},
"Spec.Distribution.Modules.Logging.Loki": {
"type": "object",
+ "description": "Configuration for the Loki package.",
"additionalProperties": false,
"properties": {
"backend": {
"type": "string",
+ "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.",
"enum": [
"minio",
"externalEndpoint"
@@ -575,37 +604,47 @@
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Loki's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the loki external endpoint"
+ "description": "External S3-compatible endpoint for Loki's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the loki external endpoint will be insecure"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the loki external endpoint"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the loki external endpoint"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the loki external endpoint"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
+ "tsdbStartDate": {
+ "type": "string",
+ "format": "date",
+ "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`."
+ },
"resources": {
"$ref": "#/$defs/Types.KubeResources"
}
- }
+ },
+ "required": [
+ "tsdbStartDate"
+ ]
},
"Spec.Distribution.Modules.Logging.Operator": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Logging Operator.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -613,41 +652,41 @@
}
},
"Spec.Distribution.Modules.Logging.CustomOutputs": {
- "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
+ "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.",
"type": "object",
"additionalProperties": false,
"properties": {
"audit": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"events": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"infra": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"ingressNginx": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"kubernetes": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"systemdCommon": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"systemdEtcd": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
},
"errors": {
"type": "string",
- "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow."
+ "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`"
}
},
"required": [
@@ -664,7 +703,7 @@
"Spec.Distribution.Modules.Monitoring": {
"type": "object",
"additionalProperties": false,
- "description": "configuration for the Monitoring module components",
+ "description": "Configuration for the Monitoring module.",
"properties": {
"type": {
"type": "string",
@@ -674,7 +713,7 @@
"prometheusAgent",
"mimir"
],
- "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage."
+ "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -720,15 +759,15 @@
},
"retentionTime": {
"type": "string",
- "description": "The retention time for the K8s Prometheus instance."
+ "description": "The retention time for the `k8s` Prometheus instance."
},
"retentionSize": {
"type": "string",
- "description": "The retention size for the k8s Prometheus instance."
+ "description": "The retention size for the `k8s` Prometheus instance."
},
"storageSize": {
"type": "string",
- "description": "The storage size for the k8s Prometheus instance."
+ "description": "The storage size for the `k8s` Prometheus instance."
},
"remoteWrite": {
"description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).",
@@ -761,15 +800,15 @@
"properties": {
"deadManSwitchWebhookUrl": {
"type": "string",
- "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io"
+ "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io."
},
"installDefaultRules": {
"type": "boolean",
- "description": "If true, the default rules will be installed"
+ "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution."
},
"slackWebhookUrl": {
"type": "string",
- "description": "The slack webhook url to send alerts"
+ "description": "The Slack webhook URL where to send the infrastructural and workload alerts to."
}
}
},
@@ -820,10 +859,11 @@
"Spec.Distribution.Modules.Monitoring.Mimir": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Mimir package.",
"properties": {
"retentionTime": {
"type": "string",
- "description": "The retention time for the mimir pods"
+ "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days."
},
"backend": {
"type": "string",
@@ -831,31 +871,32 @@
"minio",
"externalEndpoint"
],
- "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***"
+ "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
"externalEndpoint": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for Mimir's external storage backend.",
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the external mimir backend"
+ "description": "The external S3-compatible endpoint for Mimir's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the external mimir backend will not use tls"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the external mimir backend"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the external mimir backend"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the external mimir backend"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
@@ -866,11 +907,12 @@
},
"Spec.Distribution.Modules.Monitoring.Minio": {
"type": "object",
+ "description": "Configuration for Monitoring's MinIO deployment.",
"additionalProperties": false,
"properties": {
"storageSize": {
"type": "string",
- "description": "The storage size for the minio pods"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -878,11 +920,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username for the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password for the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -894,6 +936,7 @@
"Spec.Distribution.Modules.Tracing": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tracing module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -904,7 +947,7 @@
"none",
"tempo"
],
- "description": "The type of tracing to use, either ***none*** or ***tempo***"
+ "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`."
},
"tempo": {
"$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo"
@@ -920,10 +963,11 @@
"Spec.Distribution.Modules.Tracing.Tempo": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Tempo package.",
"properties": {
"retentionTime": {
"type": "string",
- "description": "The retention time for the tempo pods"
+ "description": "The retention time for the traces stored in Tempo."
},
"backend": {
"type": "string",
@@ -931,31 +975,32 @@
"minio",
"externalEndpoint"
],
- "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***"
+ "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO."
},
"externalEndpoint": {
+ "description": "Configuration for Tempo's external storage backend.",
"type": "object",
"additionalProperties": false,
"properties": {
"endpoint": {
"type": "string",
- "description": "The endpoint of the external tempo backend"
+ "description": "The external S3-compatible endpoint for Tempo's storage."
},
"insecure": {
"type": "boolean",
- "description": "If true, the external tempo backend will not use tls"
+ "description": "If true, will use HTTP as protocol instead of HTTPS."
},
"secretAccessKey": {
"type": "string",
- "description": "The secret access key of the external tempo backend"
+ "description": "The secret access key (password) for the external S3-compatible bucket."
},
"accessKeyId": {
"type": "string",
- "description": "The access key id of the external tempo backend"
+ "description": "The access key ID (username) for the external S3-compatible bucket."
},
"bucketName": {
"type": "string",
- "description": "The bucket name of the external tempo backend"
+ "description": "The bucket name of the external S3-compatible object storage."
}
}
},
@@ -966,11 +1011,12 @@
},
"Spec.Distribution.Modules.Tracing.Minio": {
"type": "object",
+ "description": "Configuration for Tracing's MinIO deployment.",
"additionalProperties": false,
"properties": {
"storageSize": {
"type": "string",
- "description": "The storage size for the minio pods"
+ "description": "The PVC size for each MinIO disk, 6 disks total."
},
"rootUser": {
"type": "object",
@@ -978,11 +1024,11 @@
"properties": {
"username": {
"type": "string",
- "description": "The username for the minio root user"
+ "description": "The username for the default MinIO root user."
},
"password": {
"type": "string",
- "description": "The password for the minio root user"
+ "description": "The password for the default MinIO root user."
}
}
},
@@ -994,6 +1040,7 @@
"Spec.Distribution.Modules.Networking": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Networking module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1011,7 +1058,7 @@
"calico",
"cilium"
],
- "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***"
+ "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`."
}
},
"required": [
@@ -1048,10 +1095,12 @@
"additionalProperties": false,
"properties": {
"podCidr": {
- "$ref": "#/$defs/Types.Cidr"
+ "$ref": "#/$defs/Types.Cidr",
+ "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`."
},
"maskSize": {
- "type": "string"
+ "type": "string",
+ "description": "The mask size to use for the Pods network on each node."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1065,6 +1114,7 @@
"Spec.Distribution.Modules.Policy": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Policy module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1076,7 +1126,7 @@
"gatekeeper",
"kyverno"
],
- "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***"
+ "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`."
},
"gatekeeper": {
"$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper"
@@ -1122,6 +1172,7 @@
"Spec.Distribution.Modules.Policy.Gatekeeper": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Gatekeeper package.",
"properties": {
"additionalExcludedNamespaces": {
"type": "array",
@@ -1137,11 +1188,11 @@
"dryrun",
"warn"
],
- "description": "The enforcement action to use for the gatekeeper module"
+ "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations."
},
"installDefaultPolicies": {
"type": "boolean",
- "description": "If true, the default policies will be installed"
+ "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1155,13 +1206,14 @@
"Spec.Distribution.Modules.Policy.Kyverno": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Kyverno package.",
"properties": {
"additionalExcludedNamespaces": {
"type": "array",
"items": {
"type": "string"
},
- "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them."
+ "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them."
},
"validationFailureAction": {
"type": "string",
@@ -1169,11 +1221,11 @@
"Audit",
"Enforce"
],
- "description": "The validation failure action to use for the kyverno module"
+ "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies."
},
"installDefaultPolicies": {
"type": "boolean",
- "description": "If true, the default policies will be installed"
+ "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1187,6 +1239,7 @@
"Spec.Distribution.Modules.Dr": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Disaster Recovery module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1197,7 +1250,7 @@
"none",
"on-premises"
],
- "description": "The type of the DR, must be ***none*** or ***on-premises***"
+ "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`."
},
"velero": {
"$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero"
@@ -1223,6 +1276,7 @@
"Spec.Distribution.Modules.Dr.Velero": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Velero package.",
"properties": {
"backend": {
"type": "string",
@@ -1268,24 +1322,57 @@
"type": "boolean",
"description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`."
},
- "cron": {
+ "definitions": {
"type": "object",
"additionalProperties": false,
- "description": "Configuration for Velero's schedules cron.",
+ "description": "Configuration for Velero schedules.",
"properties": {
"manifests": {
- "type": "string",
- "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
},
"full": {
- "type": "string",
- "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ },
+ "snapshotMoveData": {
+ "type": "boolean",
+ "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation."
+ }
+ }
}
}
- },
- "ttl": {
- "type": "string",
- "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
+ },
+ "snapshotController": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for the additional snapshotController component installation.",
+ "properties": {
+ "install": {
+ "type": "boolean",
+ "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in."
}
}
},
@@ -1297,6 +1384,7 @@
"Spec.Distribution.Modules.Auth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Auth module.",
"properties": {
"overrides": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides"
@@ -1306,7 +1394,7 @@
},
"baseDomain": {
"type": "string",
- "description": "The base domain for the auth module"
+ "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class."
},
"pomerium": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium"
@@ -1385,10 +1473,11 @@
"Spec.Distribution.Modules.Auth.Overrides": {
"type": "object",
"additionalProperties": false,
+ "description": "Override the common configuration with a particular configuration for the Auth module.",
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the auth module"
+ "description": "Set to override the node selector used to place the pods of the Auth module."
},
"tolerations": {
"type": [
@@ -1398,10 +1487,11 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the auth module"
+ "description": "Set to override the tolerations that will be added to the pods of the Auth module."
},
"ingresses": {
"type": "object",
+ "description": "Override the definition of the Auth module ingresses.",
"additionalProperties": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress"
}
@@ -1414,11 +1504,11 @@
"properties": {
"host": {
"type": "string",
- "description": "The host of the ingress"
+ "description": "Use this host for the ingress instead of the default one."
},
"ingressClass": {
"type": "string",
- "description": "The ingress class of the ingress"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
},
"required": [
@@ -1437,7 +1527,7 @@
"basicAuth",
"sso"
],
- "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***"
+ "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`."
},
"basicAuth": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth"
@@ -1450,14 +1540,15 @@
"Spec.Distribution.Modules.Auth.Provider.BasicAuth": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the HTTP Basic Auth provider.",
"properties": {
"username": {
"type": "string",
- "description": "The username for the basic auth"
+ "description": "The username for logging in with the HTTP basic authentication."
},
"password": {
"type": "string",
- "description": "The password for the basic auth"
+ "description": "The password for logging in with the HTTP basic authentication."
}
},
"required": [
@@ -1471,14 +1562,15 @@
"Spec.Distribution.Modules.Auth.Dex": {
"type": "object",
"additionalProperties": false,
+ "description": "Configuration for the Dex package.",
"properties": {
"connectors": {
"type": "array",
- "description": "The connectors for dex"
+ "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/"
},
"additionalStaticClients": {
"type": "array",
- "description": "The additional static clients for dex"
+ "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/"
},
"expiry": {
"type": "object",
@@ -1542,11 +1634,29 @@
}
},
"Types.KubeTaints": {
- "type": "array",
- "items": {
- "type": "string",
- "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$"
- }
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "effect": {
+ "type": "string",
+ "enum": [
+ "NoSchedule",
+ "PreferNoSchedule",
+ "NoExecute"
+ ]
+ },
+ "key": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "effect",
+ "key",
+ "value"
+ ]
},
"Types.KubeNodeSelector": {
"type": [
@@ -1612,11 +1722,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu request for the prometheus pods"
+ "description": "The CPU request for the Pod, in cores. Example: `500m`."
},
"memory": {
"type": "string",
- "description": "The memory request for the opensearch pods"
+ "description": "The memory request for the Pod. Example: `500M`."
}
}
},
@@ -1626,11 +1736,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu limit for the loki pods"
+ "description": "The CPU limit for the Pod. Example: `1000m`."
},
"memory": {
"type": "string",
- "description": "The memory limit for the opensearch pods"
+ "description": "The memory limit for the Pod. Example: `1G`."
}
}
}
@@ -1638,11 +1748,12 @@
},
"Types.FuryModuleOverrides": {
"type": "object",
+ "description": "Override the common configuration with a particular configuration for the module.",
"additionalProperties": false,
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the security module"
+ "description": "Set to override the node selector used to place the pods of the module."
},
"tolerations": {
"type": [
@@ -1652,7 +1763,7 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the monitoring module"
+ "description": "Set to override the tolerations that will be added to the pods of the module."
},
"ingresses": {
"type": "object",
@@ -1668,7 +1779,7 @@
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for the minio module"
+ "description": "Set to override the node selector used to place the pods of the package."
},
"tolerations": {
"type": [
@@ -1678,7 +1789,7 @@
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "The tolerations that will be added to the pods for the cert-manager module"
+ "description": "Set to override the tolerations that will be added to the pods of the package."
}
}
},
@@ -1688,15 +1799,15 @@
"properties": {
"disableAuth": {
"type": "boolean",
- "description": "If true, the ingress will not have authentication"
+ "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth."
},
"host": {
"type": "string",
- "description": "The host of the ingress"
+ "description": "Use this host for the ingress instead of the default one."
},
"ingressClass": {
"type": "string",
- "description": "The ingress class of the ingress"
+ "description": "Use this ingress class for the ingress instead of the default one."
}
}
}
diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json
index cc808f71e..e49d59cf0 100644
--- a/schemas/public/onpremises-kfd-v1alpha2.json
+++ b/schemas/public/onpremises-kfd-v1alpha2.json
@@ -1,6 +1,6 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
- "description": "",
+ "description": "A KFD Cluster deployed on top of a set of existing VMs.",
"type": "object",
"properties": {
"apiVersion": {
@@ -49,7 +49,7 @@
"properties": {
"distributionVersion": {
"type": "string",
- "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.",
+ "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.",
"minLength": 1
},
"kubernetes": {
@@ -708,7 +708,7 @@
"properties": {
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`"
+ "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`."
},
"tolerations": {
"type": "array",
@@ -726,7 +726,11 @@
},
"registry": {
"type": "string",
- "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)."
+ "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too."
+ },
+ "networkPoliciesEnabled": {
+ "type": "boolean",
+ "description": "EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules."
}
}
},
@@ -788,11 +792,11 @@
},
"baseDomain": {
"type": "string",
- "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class."
+ "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class."
},
"nginx": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx",
- "description": "Configurations for the nginx ingress controller package."
+ "description": "Configurations for the Ingress nginx controller package."
},
"certManager": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager",
@@ -837,14 +841,14 @@
},
"nodeSelector": {
"$ref": "#/$defs/Types.KubeNodeSelector",
- "description": "Set to override the node selector used to place the pods of the Ingress module"
+ "description": "Set to override the node selector used to place the pods of the Ingress module."
},
"tolerations": {
"type": "array",
"items": {
"$ref": "#/$defs/Types.KubeToleration"
},
- "description": "Set to override the tolerations that will be added to the pods of the Ingress module"
+ "description": "Set to override the tolerations that will be added to the pods of the Ingress module."
}
}
},
@@ -877,7 +881,7 @@
"single",
"dual"
],
- "description": "The type of the nginx ingress controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type."
+ "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`."
},
"tls": {
"$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS"
@@ -970,7 +974,7 @@
"properties": {
"name": {
"type": "string",
- "description": "Name of the clusterIssuer"
+ "description": "The name of the clusterIssuer."
},
"email": {
"type": "string",
@@ -986,7 +990,7 @@
},
"solvers": {
"type": "array",
- "description": "List of challenge solvers to use instead of the default one for the `http01` challenge."
+ "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field."
}
},
"required": [
@@ -1022,7 +1026,7 @@
"loki",
"customOutputs"
],
- "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage."
+ "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`."
},
"opensearch": {
"$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch"
@@ -1061,6 +1065,20 @@
]
}
},
+ {
+ "if": {
+ "properties": {
+ "type": {
+ "const": "loki"
+ }
+ }
+ },
+ "then": {
+ "required": [
+ "loki"
+ ]
+ }
+ },
{
"if": {
"properties": {
@@ -1094,7 +1112,7 @@
},
"storageSize": {
"type": "string",
- "description": "The storage size for the OpenSearch volumes."
+ "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleComponentOverrides"
@@ -1106,7 +1124,7 @@
},
"Spec.Distribution.Modules.Logging.Cerebro": {
"type": "object",
- "description": "DEPRECATED in latest versions of KFD.",
+ "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.",
"additionalProperties": false,
"properties": {
"overrides": {
@@ -1182,10 +1200,18 @@
}
}
},
+ "tsdbStartDate": {
+ "type": "string",
+ "format": "date",
+ "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`."
+ },
"resources": {
"$ref": "#/$defs/Types.KubeResources"
}
- }
+ },
+ "required": [
+ "tsdbStartDate"
+ ]
},
"Spec.Distribution.Modules.Logging.Operator": {
"type": "object",
@@ -1259,7 +1285,7 @@
"prometheusAgent",
"mimir"
],
- "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage."
+ "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`."
},
"overrides": {
"$ref": "#/$defs/Types.FuryModuleOverrides"
@@ -1346,7 +1372,7 @@
"properties": {
"deadManSwitchWebhookUrl": {
"type": "string",
- "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io"
+ "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io."
},
"installDefaultRules": {
"type": "boolean",
@@ -1426,7 +1452,7 @@
"properties": {
"endpoint": {
"type": "string",
- "description": "External S3-compatible endpoint for Mimir's storage."
+ "description": "The external S3-compatible endpoint for Mimir's storage."
},
"insecure": {
"type": "boolean",
@@ -1493,7 +1519,7 @@
"none",
"tempo"
],
- "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment."
+ "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`."
},
"tempo": {
"$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo"
@@ -1530,7 +1556,7 @@
"properties": {
"endpoint": {
"type": "string",
- "description": "External S3-compatible endpoint for Tempo's storage."
+ "description": "The external S3-compatible endpoint for Tempo's storage."
},
"insecure": {
"type": "boolean",
@@ -1603,7 +1629,7 @@
"calico",
"cilium"
],
- "description": "The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`."
+ "description": "The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`."
}
},
"required": [
@@ -1651,7 +1677,7 @@
"gatekeeper",
"kyverno"
],
- "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`."
+ "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`."
},
"gatekeeper": {
"$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper"
@@ -1775,7 +1801,7 @@
"none",
"on-premises"
],
- "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment."
+ "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`."
},
"velero": {
"$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero"
@@ -1847,24 +1873,57 @@
"type": "boolean",
"description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`."
},
- "cron": {
+ "definitions": {
"type": "object",
"additionalProperties": false,
- "description": "Configuration for Velero's schedules cron.",
+ "description": "Configuration for Velero schedules.",
"properties": {
"manifests": {
- "type": "string",
- "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
},
"full": {
- "type": "string",
- "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for Velero's manifests backup schedule.",
+ "properties": {
+ "schedule": {
+ "type": "string",
+ "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)."
+ },
+ "ttl": {
+ "type": "string",
+ "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ },
+ "snapshotMoveData": {
+ "type": "boolean",
+ "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation."
+ }
+ }
}
}
- },
- "ttl": {
- "type": "string",
- "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL."
+ }
+ }
+ },
+ "snapshotController": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "Configuration for the additional snapshotController component installation.",
+ "properties": {
+ "install": {
+ "type": "boolean",
+ "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in."
}
}
},
@@ -1886,7 +1945,7 @@
},
"baseDomain": {
"type": "string",
- "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class."
+ "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class."
},
"pomerium": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium"
@@ -2041,7 +2100,7 @@
"basicAuth",
"sso"
],
- "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication."
+ "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`."
},
"basicAuth": {
"$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth"
@@ -2298,11 +2357,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu request for the loki pods"
+ "description": "The CPU request for the Pod, in cores. Example: `500m`."
},
"memory": {
"type": "string",
- "description": "The memory request for the prometheus pods"
+ "description": "The memory request for the Pod. Example: `500M`."
}
}
},
@@ -2312,11 +2371,11 @@
"properties": {
"cpu": {
"type": "string",
- "description": "The cpu limit for the loki pods"
+ "description": "The CPU limit for the Pod. Example: `1000m`."
},
"memory": {
"type": "string",
- "description": "The memory limit for the prometheus pods"
+ "description": "The memory limit for the Pod. Example: `1G`."
}
}
}
diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl
index 157f9ff37..3dd175a5d 100644
--- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl
+++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl
@@ -85,6 +85,8 @@ spec:
nodeAllowedSshPublicKey: "ssh-ed25519 XYZ"
# Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim.
nodePoolsLaunchKind: "launch_templates"
+ # Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. Valid values are: `alinux2`, `alinux2023`
+ nodePoolGlobalAmiType: "alinux2"
# Optional Kubernetes Cluster log retention in days. Defaults to 90 days.
# logRetentionDays: 90
# This map defines the access to the Kubernetes API server
@@ -97,6 +99,7 @@ spec:
nodePools:
# This is the name of the nodepool
- name: infra
+ type: self-managed
# This map defines the max and min number of nodes in the nodepool autoscaling group
size:
min: 1
@@ -124,8 +127,8 @@ spec:
- node.kubernetes.io/role=infra:NoSchedule
# AWS tags that will be added to the ASG and EC2 instances, the example shows the labels needed by cluster autoscaler
tags:
- k8s.io/cluster-autoscaler/node-template/label/nodepool: "worker"
- k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "worker"
+ k8s.io/cluster-autoscaler/node-template/label/nodepool: "infra"
+ k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "infra"
# Optional additional firewall rules that will be attached to the nodes
#additionalFirewallRules:
# # The name of the rule
@@ -143,7 +146,7 @@ spec:
# to: 80
# # Additional AWS tags
# tags: {}
- # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations
+ # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information.
awsAuth: {}
# additionalAccounts:
# - "777777777777"
@@ -209,7 +212,7 @@ spec:
# - http01:
# ingress:
# class: nginx
- # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission
+ # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission.
dns:
# the public DNS zone definition
public:
@@ -227,6 +230,9 @@ spec:
logging:
# can be opensearch, loki, customOutput or none. With none, the logging module won't be installed
type: loki
+ # configurations for the loki package
+ loki:
+ tsdbStartDate: "2024-11-20"
# configurations for the minio-ha package
minio:
# the PVC size for each minio disk, 6 disks total
diff --git a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl
index f6af9d6b1..e2e795330 100644
--- a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl
+++ b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl
@@ -76,6 +76,9 @@ spec:
logging:
# can be opensearch, loki, customOutput or none. With none, the logging module won't be installed
type: loki
+ # configurations for the loki package
+ loki:
+ tsdbStartDate: "2024-11-20"
# configurations for the minio-ha package
minio:
# the PVC size for each minio disk, 6 disks total
diff --git a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl
index f56bbdbfa..7ea97c12a 100644
--- a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl
+++ b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl
@@ -153,6 +153,9 @@ spec:
logging:
# can be opensearch, loki, customOutput or none. With none, the logging module won't be installed
type: loki
+ # configurations for the loki package
+ loki:
+ tsdbStartDate: "2024-11-20"
# configurations for the minio-ha package
minio:
# the PVC size for each minio disk, 6 disks total
diff --git a/templates/distribution/_helpers.tpl b/templates/distribution/_helpers.tpl
index 335146356..6f5e2a26a 100644
--- a/templates/distribution/_helpers.tpl
+++ b/templates/distribution/_helpers.tpl
@@ -42,7 +42,11 @@
$moduleNodeSelector
(index .spec.distribution.common "nodeSelector") -}}
+ {{- if and (not $nodeSelector) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}}
+ {{- "{}" | indent $indent | trim -}}
+ {{- else -}}
{{- $nodeSelector | toYaml | indent $indent | trim -}}
+ {{- end -}}
{{- end -}}
{{- define "tolerations" -}}
@@ -70,7 +74,11 @@
$moduleTolerations
(index .spec.distribution.common "tolerations") -}}
+ {{- if and (not $tolerations) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}}
+ {{- "[]" | indent $indent | trim -}}
+ {{- else -}}
{{- $tolerations | toYaml | indent $indent | trim -}}
+ {{- end -}}
{{- end -}}
{{ define "globalIngressClass" }}
@@ -125,7 +133,7 @@
- hosts:
- {{ template "ingressHost" . }}
{{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }}
- secretName: {{ lower .package }}-tls
+ secretName: {{ lower .prefix | trimSuffix "." }}-tls
{{- end }}
{{- end }}
{{- end -}}
diff --git a/templates/distribution/manifests/auth/kustomization.yaml.tpl b/templates/distribution/manifests/auth/kustomization.yaml.tpl
index 5ab2396a8..a1e43808c 100644
--- a/templates/distribution/manifests/auth/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/auth/kustomization.yaml.tpl
@@ -17,6 +17,10 @@ resources:
- resources/ingress-infra.yml
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
+
patchesStrategicMerge:
- patches/infra-nodes.yml
- patches/pomerium-ingress.yml
diff --git a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl
new file mode 100644
index 000000000..0f7a8a246
--- /dev/null
+++ b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl
@@ -0,0 +1,35 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: acme-httpsolver-ingress-nginx
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ podSelector:
+ matchLabels:
+ app: cert-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ ports:
+ - port: 8089
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl
new file mode 100644
index 000000000..1b8300e14
--- /dev/null
+++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl
@@ -0,0 +1,44 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
+---
diff --git a/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..49e948a8d
--- /dev/null
+++ b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl
@@ -0,0 +1,15 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+{{- if eq .spec.distribution.modules.auth.provider.type "sso" }}
+resources:
+ - common.yaml
+ - acme-http-solver.yaml
+ - pomerium.yaml
+ - prometheus-metrics.yaml
+{{- end }}
diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl
new file mode 100644
index 000000000..d610a905d
--- /dev/null
+++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl
@@ -0,0 +1,52 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: pomerium-ingress-nginx
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: pomerium
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ ports:
+ - port: 8080
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: pomerium-egress-all
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: pomerium
+ egress:
+ - {}
+---
diff --git a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl
new file mode 100644
index 000000000..355ca48dd
--- /dev/null
+++ b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl
@@ -0,0 +1,31 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: pomerium-ingress-prometheus-metrics
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: auth
+ cluster.kfd.sighup.io/auth-provider-type: sso
+spec:
+ podSelector:
+ matchLabels:
+ app: pomerium
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - protocol: TCP
+ port: 9090
+---
diff --git a/templates/distribution/manifests/aws/kustomization.yaml.tpl b/templates/distribution/manifests/aws/kustomization.yaml.tpl
index 5fc017d20..dcfc507b0 100644
--- a/templates/distribution/manifests/aws/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/aws/kustomization.yaml.tpl
@@ -13,7 +13,8 @@ resources:
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/snapshot-controller" }}
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/load-balancer-controller" }}
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/node-termination-handler" }}
- - resources/sc.yml
+ - resources/storageclasses.yml
+ - resources/snapshotclasses.yml
patchesStrategicMerge:
- patches/cluster-autoscaler.yml
diff --git a/templates/distribution/manifests/aws/resources/snapshotclasses.yml b/templates/distribution/manifests/aws/resources/snapshotclasses.yml
new file mode 100644
index 000000000..e75210305
--- /dev/null
+++ b/templates/distribution/manifests/aws/resources/snapshotclasses.yml
@@ -0,0 +1,8 @@
+apiVersion: snapshot.storage.k8s.io/v1
+kind: VolumeSnapshotClass
+metadata:
+ name: ebs-sc
+ labels:
+ velero.io/csi-volumesnapshot-class: "true"
+driver: ebs.csi.aws.com
+deletionPolicy: Retain
\ No newline at end of file
diff --git a/templates/distribution/manifests/aws/resources/sc.yml b/templates/distribution/manifests/aws/resources/storageclasses.yml
similarity index 100%
rename from templates/distribution/manifests/aws/resources/sc.yml
rename to templates/distribution/manifests/aws/resources/storageclasses.yml
diff --git a/templates/distribution/manifests/dr/kustomization.yaml.tpl b/templates/distribution/manifests/dr/kustomization.yaml.tpl
index f2cd569a7..f3fdea029 100644
--- a/templates/distribution/manifests/dr/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/dr/kustomization.yaml.tpl
@@ -16,8 +16,12 @@ resources:
{{- else }}
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-aws" }}
- resources/storageLocation.yaml
+ - resources/volumeSnapshotLocation.yaml
{{- end }}
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-node-agent" }}
+{{- if .spec.distribution.modules.dr.velero.snapshotController.install }}
+ - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/snapshot-controller" }}
+{{- end }}
{{- end }}
{{- if .spec.distribution.modules.dr.velero.schedules.install }}
@@ -34,13 +38,9 @@ patchesStrategicMerge:
- patches/eks-velero.yml
{{- end }}
{{- if .spec.distribution.modules.dr.velero.schedules.install }}
-{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests")) }}
- patches/velero-schedule-manifests.yml
-{{- end }}
-{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full")) }}
- patches/velero-schedule-full.yml
{{- end }}
-{{- end }}
{{- if eq .spec.distribution.common.provider.type "none" }}
{{- if eq .spec.distribution.modules.dr.velero.backend "externalEndpoint" }}
diff --git a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl
index 271394159..834150ac0 100644
--- a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl
+++ b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl
@@ -61,4 +61,21 @@ spec:
{{ template "tolerations" $veleroArgs }}
{{- end }}
+
+{{- if .spec.distribution.modules.dr.velero.snapshotController.install }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: snapshot-controller
+ namespace: kube-system
+spec:
+ template:
+ spec:
+ nodeSelector:
+ {{ template "nodeSelector" $veleroArgs }}
+ tolerations:
+ {{ template "tolerations" $veleroArgs }}
+{{- end }}
+
{{- end }}
\ No newline at end of file
diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl
index 5bf3de289..b572e61ba 100644
--- a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl
+++ b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl
@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full")) }}
---
apiVersion: velero.io/v1
kind: Schedule
@@ -10,11 +9,8 @@ metadata:
name: full
namespace: kube-system
spec:
- {{- if and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full") }}
- schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.full }}
- {{- end }}
- {{- if index .spec.distribution.modules.dr.velero.schedules "ttl" }}
+ schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule }}"
template:
- ttl: {{ .spec.distribution.modules.dr.velero.schedules.ttl }}
- {{- end }}
-{{- end }}
+ ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl }}"
+ snapshotMoveData: {{ .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData }}
+
diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl
index c08006331..3441eb7fc 100644
--- a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl
+++ b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl
@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests")) }}
---
apiVersion: velero.io/v1
kind: Schedule
@@ -10,11 +9,6 @@ metadata:
name: manifests
namespace: kube-system
spec:
- {{- if and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests") }}
- schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.manifests }}
- {{- end }}
- {{- if index .spec.distribution.modules.dr.velero.schedules "ttl" }}
+ schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule }}"
template:
- ttl: {{ .spec.distribution.modules.dr.velero.schedules.ttl }}
- {{- end }}
-{{- end }}
+ ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl }}"
diff --git a/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl
new file mode 100644
index 000000000..5679203f9
--- /dev/null
+++ b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl
@@ -0,0 +1,16 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: velero.io/v1
+kind: VolumeSnapshotLocation
+metadata:
+ name: default
+ namespace: kube-system
+ labels:
+ k8s-app: velero
+spec:
+ config:
+ region: custom
+ provider: aws
\ No newline at end of file
diff --git a/templates/distribution/manifests/ingress/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/kustomization.yaml.tpl
index ad0a7dae4..8225fdadc 100644
--- a/templates/distribution/manifests/ingress/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/ingress/kustomization.yaml.tpl
@@ -24,6 +24,10 @@ resources:
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
+
{{- if ne .spec.distribution.modules.ingress.nginx.type "none" }}
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/ingress/katalog/forecastle" }}
{{- end }}
@@ -92,14 +96,14 @@ patchesJson6902:
group: apps
version: v1
kind: DaemonSet
- name: nginx-ingress-controller-external
+ name: ingress-nginx-controller-external
namespace: ingress-nginx
path: patchesJson/ingress-nginx.yml
- target:
group: apps
version: v1
kind: DaemonSet
- name: nginx-ingress-controller-internal
+ name: ingress-nginx-controller-internal
namespace: ingress-nginx
path: patchesJson/ingress-nginx.yml
{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
@@ -107,7 +111,7 @@ patchesJson6902:
group: apps
version: v1
kind: DaemonSet
- name: nginx-ingress-controller
+ name: ingress-nginx-controller
namespace: ingress-nginx
path: patchesJson/ingress-nginx.yml
{{- end }}
diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl
index 663d748ea..a18265814 100644
--- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl
+++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl
@@ -22,7 +22,7 @@ spec:
apiVersion: v1
kind: ConfigMap
metadata:
- name: nginx-configuration-external
+ name: ingress-nginx-controller-external
namespace: ingress-nginx
data:
use-proxy-protocol: "true"
diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl
index a7aa6f6ad..6ae0a6b14 100644
--- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl
+++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl
@@ -21,7 +21,7 @@ spec:
apiVersion: v1
kind: ConfigMap
metadata:
- name: nginx-configuration-internal
+ name: ingress-nginx-controller-internal
namespace: ingress-nginx
data:
use-proxy-protocol: "true"
diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl
index 08dc64d82..60b7771ba 100644
--- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl
+++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl
@@ -22,7 +22,7 @@ spec:
apiVersion: v1
kind: ConfigMap
metadata:
- name: nginx-configuration
+ name: ingress-nginx-controller
namespace: ingress-nginx
data:
use-proxy-protocol: "true"
diff --git a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl
index f94fe9a6c..2ba355781 100644
--- a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl
+++ b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl
@@ -66,7 +66,7 @@ spec:
apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: nginx-ingress-controller-external
+ name: ingress-nginx-controller-external
namespace: ingress-nginx
spec:
template:
@@ -79,7 +79,7 @@ spec:
apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: nginx-ingress-controller-internal
+ name: ingress-nginx-controller-internal
namespace: ingress-nginx
spec:
template:
@@ -93,7 +93,7 @@ spec:
apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: nginx-ingress-controller
+ name: ingress-nginx-controller
namespace: ingress-nginx
spec:
template:
diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl
new file mode 100644
index 000000000..bbc937c2b
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl
@@ -0,0 +1,88 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# https://cert-manager.io/docs/installation/best-practice/#network-requirements
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: cert-manager-egress-kube-apiserver
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/instance: cert-manager
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: cert-manager-webhook-ingress-kube-apiserver
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: webhook
+ app.kubernetes.io/instance: cert-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - port: 10250
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: cert-manager-egress-https
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/instance: cert-manager
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+ - port: 80
+ protocol: TCP
+---
+{{- if eq .spec.distribution.modules.auth.provider.type "sso" }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: acme-http-solver-ingress-lets-encrypt
+ namespace: pomerium
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app: cert-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - port: 8089
+ protocol: TCP
+---
+{{- end }}
diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl
new file mode 100644
index 000000000..963b7db18
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl
@@ -0,0 +1,38 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: UDP
+ port: 53
+ - protocol: TCP
+ port: 53
+ # https://cert-manager.io/docs/installation/best-practice/#network-requirements
\ No newline at end of file
diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl
new file mode 100644
index 000000000..b71d8d27f
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl
@@ -0,0 +1,12 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - cert-manager.yaml
+ - prometheus-metrics.yaml
diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl
new file mode 100644
index 000000000..c329f39e5
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl
@@ -0,0 +1,30 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: cert-manager-ingress-prometheus-metrics
+ namespace: cert-manager
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9402
+ protocol: TCP
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl
new file mode 100644
index 000000000..d1a1f295a
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl
@@ -0,0 +1,44 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
+---
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl
new file mode 100644
index 000000000..3bd02356e
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl
@@ -0,0 +1,22 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: external-dns-egress-all
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app: external-dns
+ policyTypes:
+ - Egress
+ egress:
+ - {}
+---
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl
new file mode 100644
index 000000000..c223b5b3d
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl
@@ -0,0 +1,59 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: forecastle-ingress-nginx
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app: forecastle
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+{{ else }}
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+{{- end }}
+ podSelector:
+ matchLabels:
+{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ app: pomerium
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ ports:
+ - port: 3000
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: forecastle-egress-kube-apiserver
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+ app: forecastle
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
\ No newline at end of file
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl
new file mode 100644
index 000000000..46494b30e
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl
@@ -0,0 +1,14 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - forecastle.yaml
+ - nginx-ingress-controller.yaml
+ - prometheus-metrics.yaml
+ - external-dns.yaml
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl
new file mode 100644
index 000000000..164cb229c
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl
@@ -0,0 +1,51 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: nginx-egress-all
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ policyTypes:
+ - Egress
+ egress:
+ - {}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-ingress-nginx
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ ingress:
+ - ports:
+ - port: 8080
+ protocol: TCP
+ - port: 8443
+ protocol: TCP
+ - port: 9443
+ protocol: TCP
+ policyTypes:
+ - Ingress
diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl
new file mode 100644
index 000000000..f070b9d54
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl
@@ -0,0 +1,33 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: nginx-ingress-prometheus-metrics
+ namespace: ingress-nginx
+ labels:
+ cluster.kfd.sighup.io/module: ingress
+ cluster.kfd.sighup.io/ingress-type: nginx
+spec:
+ podSelector:
+ matchLabels:
+{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - protocol: TCP
+ port: 10254
diff --git a/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..22b97ea52
--- /dev/null
+++ b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl
@@ -0,0 +1,15 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+{{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }}
+ - cert-manager
+{{ end }}
+{{- if ne .spec.distribution.modules.ingress.nginx.type "none" }}
+ - ingress-nginx
+{{ end }}
\ No newline at end of file
diff --git a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl
index c2ca78d71..357bd2e2b 100644
--- a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl
+++ b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl
@@ -36,9 +36,10 @@ spec:
app: cert-manager
spec:
nodeSelector:
- {{ template "nodeSelector" $certManagerArgs }}
+ {{- /* NOTE!: merge order is important below */}}
+ {{ template "nodeSelector" ( merge (dict "returnEmptyInsteadOfNull" true) $certManagerArgs ) }}
tolerations:
- {{ template "tolerations" ( merge (dict "indent" 16) $certManagerArgs ) }}
+ {{ template "tolerations" ( merge (dict "indent" 16 "returnEmptyInsteadOfNull" true) $certManagerArgs ) }}
{{- end -}}
{{- else if .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers }}
solvers:
diff --git a/templates/distribution/manifests/logging/kustomization.yaml.tpl b/templates/distribution/manifests/logging/kustomization.yaml.tpl
index c8f361bfd..9b1b42738 100644
--- a/templates/distribution/manifests/logging/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/logging/kustomization.yaml.tpl
@@ -47,6 +47,9 @@ resources:
- {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/logging/katalog/loki-distributed" }}
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
# The kustomize version we are using does not support specifing more than 1 strategicMerge patch
# in a single YAML file under the `patches` directive like the old versions did for `patchesStrategicMerge`.
diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl
index f1851f754..7d9c30240 100644
--- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl
+++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl
@@ -77,6 +77,15 @@ schema_config:
object_store: s3
schema: v11
store: boltdb-shipper
+{{- if and (index .spec.distribution.modules.logging "loki") (index .spec.distribution.modules.logging.loki "tsdbStartDate") }}
+ - from: "{{ .spec.distribution.modules.logging.loki.tsdbStartDate }}"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v13
+ store: tsdb
+{{- end }}
server:
http_listen_port: 3100
storage_config:
@@ -95,6 +104,12 @@ storage_config:
cache_ttl: 24h
resync_interval: 5s
shared_store: s3
+ tsdb_shipper:
+ active_index_directory: /var/loki/index
+ cache_location: /var/loki/cache
+ cache_ttl: 24h
+ resync_interval: 5s
+ shared_store: s3
filesystem:
directory: /var/loki/chunks
table_manager:
diff --git a/templates/distribution/manifests/logging/patches/minio.root.env.tpl b/templates/distribution/manifests/logging/patches/minio.root.env.tpl
index e1ed7291c..0458f94b2 100644
--- a/templates/distribution/manifests/logging/patches/minio.root.env.tpl
+++ b/templates/distribution/manifests/logging/patches/minio.root.env.tpl
@@ -1,2 +1,2 @@
-ROOT_PASSWORD={{ .spec.distribution.modules.logging.minio.rootUser.password }}
-ROOT_USER={{ .spec.distribution.modules.logging.minio.rootUser.username }}
+rootPassword={{ .spec.distribution.modules.logging.minio.rootUser.password }}
+rootUser={{ .spec.distribution.modules.logging.minio.rootUser.username }}
diff --git a/templates/distribution/manifests/logging/policies/common.yaml.tpl b/templates/distribution/manifests/logging/policies/common.yaml.tpl
new file mode 100644
index 000000000..6fd8ddaad
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/common.yaml.tpl
@@ -0,0 +1,41 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
diff --git a/templates/distribution/manifests/logging/policies/configs.yaml.tpl b/templates/distribution/manifests/logging/policies/configs.yaml.tpl
new file mode 100644
index 000000000..05ff5e2d5
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/configs.yaml.tpl
@@ -0,0 +1,23 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: event-tailer-egress-kube-apiserver
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: event-tailer
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl
new file mode 100644
index 000000000..48f6095a0
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl
@@ -0,0 +1,66 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentbit-egress-fluentd
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentbit
+ egress:
+ - ports:
+ # fluentd
+ - port: 24240
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentbit-egress-kube-apiserver
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentbit
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentbit-ingress-prometheus-metrics
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentbit
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ ports:
+ - port: 2020
+ protocol: TCP
diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl
new file mode 100644
index 000000000..95adfac59
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl
@@ -0,0 +1,73 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentd-egress-all
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentd
+ egress:
+ - {}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentd-ingress-fluentbit
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentd
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentbit
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ ports:
+ - port: 24240
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: fluentd-ingress-prometheus-metrics
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentd
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ ports:
+ - port: 24231
+ protocol: TCP
+---
+
diff --git a/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..17f67c2ef
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl
@@ -0,0 +1,26 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+{{ $loggingType := .spec.distribution.modules.logging.type }}
+
+resources:
+ - common.yaml
+ - configs.yaml
+ - fluentbit.yaml
+ - fluentd.yaml
+ - logging-operator.yaml
+ - minio.yaml
+
+{{- if eq $loggingType "loki" }}
+ - loki.yaml
+{{- end }}
+
+{{- if eq $loggingType "opensearch" }}
+ - opensearch-dashboards.yaml
+ - opensearch.yaml
+{{- end }}
diff --git a/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl
new file mode 100644
index 000000000..bc0a2cccd
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl
@@ -0,0 +1,22 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: logging-operator-egress-kube-apiserver
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: logging-operator
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl
new file mode 100644
index 000000000..7bae584c3
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl
@@ -0,0 +1,150 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: loki-distributed-ingress-fluentd
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: loki
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ app.kubernetes.io/component: gateway
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentd
+ ports:
+ - port: 8080
+ protocol: TCP
+ - port: 3100
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: loki-distributed-ingress-grafana
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: loki
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ app.kubernetes.io/component: gateway
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: grafana
+ ports:
+ - port: 8080
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: loki-distributed-ingress-prometheus-metrics
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: loki
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ ingress:
+ - ports:
+ - port: 3100
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: loki-distributed-discovery
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: loki
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ ingress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 3100
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ egress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 3100
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: loki-distributed-egress-all
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: loki
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: loki-distributed
+ egress:
+ - {}
+---
diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl
new file mode 100644
index 000000000..09c6ffa34
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl
@@ -0,0 +1,178 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-namespace
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ ports:
+ - port: 9000
+ protocol: TCP
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-kube-apiserver
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-logging-buckets-setup
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-minio
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-logging-buckets-setup
+ egress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-prometheus-metrics
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-egress-https
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-nginx
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+# single nginx, no sso
+{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 9001
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl
new file mode 100644
index 000000000..6a8fb98cc
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl
@@ -0,0 +1,118 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-dashboards-egress-opensearch
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: opensearch-dashboards
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ports:
+ - port: 9200
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-dashboards-ingress-jobs
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: opensearch-dashboards
+ release: opensearch-dashboards
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch-dashboards
+ app.kubernetes.io/instance: opensearch-dashboards
+ ports:
+ - port: 5601
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-dashboards-ingress-nginx
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: opensearch-dashboards
+ ingress:
+ - from:
+ - namespaceSelector:
+{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+{{ else }}
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+{{- end }}
+ podSelector:
+ matchLabels:
+{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ app: pomerium
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
+ app: ingress
+{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }}
+ app: ingress-nginx
+{{- end }}
+ ports:
+ - port: 5601
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: jobs-egress-opensearch-dashboards
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch-dashboards
+ app.kubernetes.io/instance: opensearch-dashboards
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: opensearch-dashboards
+ release: opensearch-dashboards
+ ports:
+ - port: 5601
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl
new file mode 100644
index 000000000..fccfeae54
--- /dev/null
+++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl
@@ -0,0 +1,169 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-ingress-dashboards
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app: opensearch-dashboards
+ ports:
+ - port: 9200
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-ingress-fluentd
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: fluentd
+ ports:
+ - port: 9200
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-discovery
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ports:
+ - port: 9300
+ protocol: TCP
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ports:
+ - port: 9300
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-ingress-prometheus-metrics
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9108
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: opensearch-ingress-jobs
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ app.kubernetes.io/instance: opensearch
+ ports:
+ - port: 9200
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: jobs-egress-opensearch
+ namespace: logging
+ labels:
+ cluster.kfd.sighup.io/module: logging
+ cluster.kfd.sighup.io/logging-type: opensearch
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ app.kubernetes.io/instance: opensearch
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: opensearch
+ ports:
+ - port: 9200
+ protocol: TCP
+---
\ No newline at end of file
diff --git a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl
index d2bb75201..83a453240 100644
--- a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl
+++ b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl
@@ -65,7 +65,7 @@ metadata:
forecastle.stakater.com/icon: "https://min.io/resources/img/logo/MINIO_Bird.png"
{{ if not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth }}{{ template "ingressAuth" . }}{{ end }}
{{ template "certManagerClusterIssuer" . }}
-
+
{{ if and (not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth) (eq .spec.distribution.modules.auth.provider.type "sso") }}
name: minio-logging
namespace: pomerium
@@ -93,4 +93,4 @@ spec:
port:
name: http
{{ end }}
-{{- template "ingressTls" (dict "module" "logging" "package" "minio-logging" "prefix" "minio-logging." "spec" .spec) }}
+{{- template "ingressTls" (dict "module" "logging" "package" "minio" "prefix" "minio-logging." "spec" .spec) }}
diff --git a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl
index 9297778b3..955daf763 100644
--- a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl
@@ -54,6 +54,10 @@ resources:
- secrets/alertmanager.yml
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
+
patchesStrategicMerge:
- patches/infra-nodes.yml
{{- if eq .spec.distribution.common.provider.type "eks" }}{{/* in EKS there are no files to monitor on nodes */}}
diff --git a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl
index 15de617fa..0dec09ed2 100644
--- a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl
+++ b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl
@@ -1,2 +1,2 @@
-ROOT_PASSWORD={{ .spec.distribution.modules.monitoring.minio.rootUser.password }}
-ROOT_USER={{ .spec.distribution.modules.monitoring.minio.rootUser.username }}
+rootPassword={{ .spec.distribution.modules.monitoring.minio.rootUser.password }}
+rootUser={{ .spec.distribution.modules.monitoring.minio.rootUser.username }}
diff --git a/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl
new file mode 100644
index 000000000..2ed8a7215
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl
@@ -0,0 +1,44 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/alertmanager-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: alertmanager-main
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9093
+ protocol: TCP
+ - port: 8080
+ protocol: TCP
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: alertmanager
+ ports:
+ - port: 9094
+ protocol: TCP
+ - port: 9094
+ protocol: UDP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: alert-router
+ app.kubernetes.io/instance: main
+ app.kubernetes.io/name: alertmanager
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl
new file mode 100644
index 000000000..c8b4745c7
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl
@@ -0,0 +1,35 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+#ย source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/blackboxExporter-networkPolicy.yaml
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: blackbox-exporter
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9115
+ protocol: TCP
+ - port: 19115
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: exporter
+ app.kubernetes.io/name: blackbox-exporter
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl
new file mode 100644
index 000000000..9ca8ec757
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl
@@ -0,0 +1,44 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
+ - protocol: TCP
+ port: 53
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl
new file mode 100644
index 000000000..95b548e7b
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl
@@ -0,0 +1,82 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+{{- $monitoringType := .spec.distribution.modules.monitoring.type }}
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/grafana-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: grafana
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 3000
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: grafana
+ app.kubernetes.io/name: grafana
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: grafana-ingress-nginx
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: grafana
+ app.kubernetes.io/name: grafana
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Ingress
+ ingress:
+# single nginx, no sso
+{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 3000
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl
new file mode 100644
index 000000000..759609694
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl
@@ -0,0 +1,103 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-ingress-nginx
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: prometheus
+ app.kubernetes.io/instance: k8s
+ app.kubernetes.io/name: prometheus
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Ingress
+ ingress:
+# single nginx, no sso
+{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 9090
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: alertmanager-ingress-nginx
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: alert-router
+ app.kubernetes.io/instance: main
+ app.kubernetes.io/name: alertmanager
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Ingress
+ ingress:
+# single nginx, no sso
+{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 9093
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl
new file mode 100644
index 000000000..0851cf907
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl
@@ -0,0 +1,34 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/kubeStateMetrics-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kube-state-metrics
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 8443
+ protocol: TCP
+ - port: 9443
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: exporter
+ app.kubernetes.io/name: kube-state-metrics
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..0fa4c4391
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl
@@ -0,0 +1,33 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+{{- $monitoringType := .spec.distribution.modules.monitoring.type }}
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - prometheus-operator.yaml
+ - kube-state-metrics.yaml
+ - node-exporter.yaml
+ - x509-exporter.yaml
+ - blackbox-exporter.yaml
+
+{{- if or (eq $monitoringType "prometheus") (eq $monitoringType "mimir") }}
+ - alertmanager.yaml
+ - prometheus-adapter.yaml
+ - grafana.yaml
+ - prometheus.yaml
+{{- end }}
+{{- if eq $monitoringType "mimir" }}
+ - mimir.yaml
+{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }}
+ - minio.yaml
+{{- end }}
+{{- end }}
+
+{{- if and (ne .spec.distribution.modules.ingress.nginx.type "none") }}{{/* we don't need ingresses for Prometheus in Agent mode */}}
+ - ingress.yaml
+{{- end }}
diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl
new file mode 100644
index 000000000..77dd0149d
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl
@@ -0,0 +1,191 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-distributed-ingress-prometheus-metrics
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+ ingress:
+ - ports:
+ - port: 8080
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-distributed-discovery
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+ ingress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ - port: 8080
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+ egress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ - port: 8080
+ protocol: TCP
+ to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-gateway-ingress-grafana
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: gateway
+ app.kubernetes.io/instance: mimir-distributed
+ app.kubernetes.io/name: mimir
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: grafana
+ app.kubernetes.io/component: grafana
+ ports:
+ - port: 8080
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-querier-egress-https
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/instance: mimir-distributed
+ app.kubernetes.io/name: mimir
+ app.kubernetes.io/component: querier
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-ingester-egress-https
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/instance: mimir-distributed
+ app.kubernetes.io/name: mimir
+ app.kubernetes.io/component: ingester
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-distributed-egress-minio
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ ports:
+ - port: 9000
+ protocol: TCP
+{{- else }}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: mimir-distributed-egress-all
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-type: mimir
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: mimir
+ egress:
+ - {}
+{{- end }}
diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl
new file mode 100644
index 000000000..2af4eae0e
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl
@@ -0,0 +1,178 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-namespace
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ ports:
+ - port: 9000
+ protocol: TCP
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-kube-apiserver
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-monitoring-buckets-setup
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-minio
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-monitoring-buckets-setup
+ egress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-prometheus-metrics
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-monitoring-egress-https
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-nginx
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+ cluster.kfd.sighup.io/monitoring-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+# single nginx, no sso
+{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 9001
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl
new file mode 100644
index 000000000..4b06c7ece
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl
@@ -0,0 +1,32 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/nodeExporter-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: node-exporter
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9100
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: exporter
+ app.kubernetes.io/name: node-exporter
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl
new file mode 100644
index 000000000..7f26d2dd5
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl
@@ -0,0 +1,50 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusAdapter-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-adapter
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - {}
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: metrics-adapter
+ app.kubernetes.io/name: prometheus-adapter
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-ingress-prometheus-adapter
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/component: metrics-adapter
+ app.kubernetes.io/name: prometheus-adapter
+ app.kubernetes.io/part-of: kube-prometheus
+ ports:
+ - port: 9090
+ protocol: TCP
+---
diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl
new file mode 100644
index 000000000..d33974f30
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl
@@ -0,0 +1,32 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusOperator-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-operator
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 8443
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: controller
+ app.kubernetes.io/name: prometheus-operator
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl
new file mode 100644
index 000000000..9d5fee209
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl
@@ -0,0 +1,166 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+{{- $monitoringType := .spec.distribution.modules.monitoring.type }}
+
+# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheus-networkPolicy.yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-k8s
+ namespace: monitoring
+spec:
+ egress:
+ - {}
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9090
+ protocol: TCP
+ - port: 8080
+ protocol: TCP
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus-adapter
+ ports:
+ - port: 9090
+ protocol: TCP
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: grafana
+ ports:
+ - port: 9090
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: prometheus
+ app.kubernetes.io/instance: k8s
+ app.kubernetes.io/name: prometheus
+ app.kubernetes.io/part-of: kube-prometheus
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-egress-minio
+ namespace: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+{{- if eq $monitoringType "mimir" }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-egress-mimir
+ namespace: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ app.kubernetes.io/instance: k8s
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/component: gateway
+ app.kubernetes.io/name: mimir
+ app.kubernetes.io/instance: mimir-distributed
+ ports:
+ - port: 8080
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-egress-kube-apiserver
+ namespace: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+ - port: 8405
+ protocol: TCP
+---
+{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-egress-miniologging
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: logging
+ podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: prometheus-egress-minio-monitoring
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ app.kubernetes.io/instance: k8s
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+{{- end }}
+{{- end }}
+
diff --git a/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl
new file mode 100644
index 000000000..a89c3f207
--- /dev/null
+++ b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl
@@ -0,0 +1,45 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: x509-exporter-egress-kube-apiserver
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: x509-certificate-exporter
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: x509-exporter-ingress-prometheus-metrics
+ namespace: monitoring
+ labels:
+ cluster.kfd.sighup.io/module: monitoring
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: x509-certificate-exporter
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - port: 9793
+ protocol: TCP
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl
index 0ea55cad5..ef87719f5 100644
--- a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl
+++ b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl
@@ -177,11 +177,11 @@ spec:
number: 80
{{ else }}
service:
- name: minio-tracing-console
+ name: minio-monitoring-console
port:
name: http
{{ end }}
-{{- template "ingressTls" (dict "module" "monitoring" "package" "minio-monitoring" "prefix" "minio-monitoring." "spec" .spec) }}
+{{- template "ingressTls" (dict "module" "monitoring" "package" "minio" "prefix" "minio-monitoring." "spec" .spec) }}
{{- end }}
{{- end }}
diff --git a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl
index 062a63c94..a616ece68 100644
--- a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl
+++ b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl
@@ -30,9 +30,7 @@ spec:
probeSelector: {}
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
- scrapeConfigSelector:
- matchLabels:
- prometheus: k8s
+ scrapeConfigSelector: {}
{{- $prometheusAgentArgs := dict "module" "monitoring" "package" "prometheusAgent" "spec" .spec }}
tolerations:
diff --git a/templates/distribution/manifests/opa/kustomization.yaml.tpl b/templates/distribution/manifests/opa/kustomization.yaml.tpl
index 6b6672a33..b31532d5b 100644
--- a/templates/distribution/manifests/opa/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/opa/kustomization.yaml.tpl
@@ -27,6 +27,10 @@ resources:
{{- end }}
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
+
patchesStrategicMerge:
- patches/infra-nodes.yml
{{- if .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces }}
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl
new file mode 100644
index 000000000..10f8a1e52
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl
@@ -0,0 +1,22 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: audit-controller-egress-kube-apiserver
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: audit-controller
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl
new file mode 100644
index 000000000..ad51c243a
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl
@@ -0,0 +1,43 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-dns
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
\ No newline at end of file
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl
new file mode 100644
index 000000000..75fed7196
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl
@@ -0,0 +1,43 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: controller-manager-egress-kube-apiserver
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: controller-manager-ingress-kube-apiserver
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ control-plane: controller-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 8443
+ - protocol: TCP
+ port: 443
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl
new file mode 100644
index 000000000..84557ba3e
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl
@@ -0,0 +1,48 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: gpm-egress-kube-apiserver
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ app: gatekeeper-policy-manager
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: gpm-ingress-pomerium
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ app: gatekeeper-policy-manager
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+ ports:
+ - protocol: TCP
+ port: 8080
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl
new file mode 100644
index 000000000..79f5cfce0
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl
@@ -0,0 +1,15 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - audit.yaml
+ - controller-manager.yaml
+ - gatekeeper-policy-manager.yaml
+ - prometheus-metrics.yaml
+
diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl
new file mode 100644
index 000000000..44cd7a68b
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl
@@ -0,0 +1,29 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: gatekeeper-ingress-prometheus-metrics
+ namespace: gatekeeper-system
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: gatekeeper
+spec:
+ podSelector:
+ matchLabels:
+ gatekeeper.sh/system: "yes"
+ policyTypes:
+ - Ingress
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+ ports:
+ - protocol: TCP
+ port: 8888
diff --git a/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..aed10dc32
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl
@@ -0,0 +1,16 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+{{- if eq .spec.distribution.modules.policy.type "gatekeeper" }}
+ - gatekeeper
+{{- end }}
+{{- if eq .spec.distribution.modules.policy.type "kyverno" }}
+ - kyverno
+{{- end }}
+
diff --git a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl
new file mode 100644
index 000000000..ccb1424a9
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl
@@ -0,0 +1,42 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-dns
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
diff --git a/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl
new file mode 100644
index 000000000..77a88b0bb
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl
@@ -0,0 +1,11 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - kyverno.yaml
diff --git a/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl
new file mode 100644
index 000000000..ff8c06b24
--- /dev/null
+++ b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl
@@ -0,0 +1,117 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-admission-egress-kube-apiserver
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: admission-controller
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 6443
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-admission-ingress-nodes
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: admission-controller
+ policyTypes:
+ - Ingress
+ ingress:
+ - ports:
+ - protocol: TCP
+ port: 9443
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-background-egress-kube-apiserver
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: background-controller
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 6443
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-reports-egress-kube-apiserver
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: reports-controller
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 6443
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-cleanup-egress-kube-apiserver
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: cleanup-controller
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 6443
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: kyverno-cleanup-reports-egress-kube-apiserver
+ namespace: kyverno
+ labels:
+ cluster.kfd.sighup.io/module: opa
+ cluster.kfd.sighup.io/policy-type: kyverno
+spec:
+ podSelector:
+ matchExpressions:
+ - { key: "batch.kubernetes.io/job-name", operator: "Exists" }
+ policyTypes:
+ - Egress
+ egress:
+ - ports:
+ - protocol: TCP
+ port: 6443
diff --git a/templates/distribution/manifests/tracing/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/kustomization.yaml.tpl
index bf54f6130..aab87047e 100644
--- a/templates/distribution/manifests/tracing/kustomization.yaml.tpl
+++ b/templates/distribution/manifests/tracing/kustomization.yaml.tpl
@@ -17,6 +17,10 @@ resources:
{{- end }}
{{- end }}
+{{ if eq .spec.distribution.common.networkPoliciesEnabled true }}
+ - policies
+{{- end }}
+
patchesStrategicMerge:
- patches/infra-nodes.yml
{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }}
diff --git a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl
index a63a82680..333ad7378 100644
--- a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl
+++ b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl
@@ -1,2 +1,2 @@
-ROOT_PASSWORD={{ .spec.distribution.modules.tracing.minio.rootUser.password }}
-ROOT_USER={{ .spec.distribution.modules.tracing.minio.rootUser.username }}
+rootPassword={{ .spec.distribution.modules.tracing.minio.rootUser.password }}
+rootUser={{ .spec.distribution.modules.tracing.minio.rootUser.username }}
diff --git a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl
index b1cd52196..99ac37a25 100644
--- a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl
+++ b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl
@@ -86,13 +86,12 @@ querier:
trace_by_id:
query_timeout: 10s
query_frontend:
+ max_outstanding_per_tenant: 2000
max_retries: 2
search:
concurrent_jobs: 1000
target_bytes_per_job: 104857600
trace_by_id:
- hedge_requests_at: 2s
- hedge_requests_up_to: 2
query_shards: 50
server:
grpc_server_max_recv_msg_size: 4194304
diff --git a/templates/distribution/manifests/tracing/policies/common.yaml.tpl b/templates/distribution/manifests/tracing/policies/common.yaml.tpl
new file mode 100644
index 000000000..6727129eb
--- /dev/null
+++ b/templates/distribution/manifests/tracing/policies/common.yaml.tpl
@@ -0,0 +1,42 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: deny-all
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ podSelector: {}
+ policyTypes:
+ - Egress
+ - Ingress
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-kube-dns
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ podSelector:
+ matchLabels: {}
+ policyTypes:
+ - Egress
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: kube-system
+ podSelector:
+ matchLabels:
+ k8s-app: kube-dns
+ ports:
+ - protocol: UDP
+ port: 53
+
\ No newline at end of file
diff --git a/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl
new file mode 100644
index 000000000..96e0dff5c
--- /dev/null
+++ b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl
@@ -0,0 +1,14 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - common.yaml
+ - tempo.yaml
+{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }}
+ - minio.yaml
+{{- end }}
diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl
new file mode 100644
index 000000000..9e4244d78
--- /dev/null
+++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl
@@ -0,0 +1,177 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-namespace
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+ ports:
+ - port: 9000
+ protocol: TCP
+ egress:
+ - to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+ podSelector:
+ matchLabels:
+ app: minio
+ ports:
+ - port: 9000
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-kube-apiserver
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-tracing-buckets-setup
+ egress:
+ - ports:
+ - port: 6443
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-buckets-setup-egress-minio
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio-tracing-buckets-setup
+ egress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-prometheus-metrics
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+ - ports:
+ - port: 9000
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-ingress-pomerium
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app: minio
+ ingress:
+# single nginx, no sso
+{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress-nginx
+# dual nginx, no sso
+{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: ingress-nginx
+ podSelector:
+ matchLabels:
+ app: ingress
+# sso
+{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }}
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: pomerium
+ podSelector:
+ matchLabels:
+ app: pomerium
+{{ end }}
+ ports:
+ - port: 9001
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: minio-egress-https
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app: minio
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+---
\ No newline at end of file
diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl
new file mode 100644
index 000000000..09528ec3d
--- /dev/null
+++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl
@@ -0,0 +1,255 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-distributed-discovery
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Ingress
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ ingress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ - port: 3100
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ egress:
+ - ports:
+ - port: 9095
+ protocol: TCP
+ - port: 7946
+ protocol: TCP
+ - port: 3100
+ protocol: TCP
+ to:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-gateway-ingress-grafana
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: gateway
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/instance: tempo-distributed
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/component: grafana
+ app.kubernetes.io/name: grafana
+ app.kubernetes.io/part-of: kube-prometheus
+ ports:
+ - port: 8080
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: all-egress-tempo-distributor
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Egress
+ podSelector: {}
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/component: distributor
+ ports:
+ - port: 4317
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-distributor-ingress-traces
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/component: distributor
+ ingress:
+ - ports:
+ - port: 4317
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-components-egress-memcached
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/instance: tempo-distributed
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/component: memcached
+ ports:
+ - port: 11211
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: memcached-ingress-querier
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/component: memcached
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/component: querier
+ ports:
+ - port: 11211
+ protocol: TCP
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-distributed-ingress-prometheus-metrics
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Ingress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ ingress:
+ - ports:
+ - port: 3100
+ protocol: TCP
+ from:
+ - namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: monitoring
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: prometheus
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-components-egress-https
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ app.kubernetes.io/instance: tempo-distributed
+ egress:
+ - ports:
+ - port: 443
+ protocol: TCP
+{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-distributed-egress-minio
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+ cluster.kfd.sighup.io/tracing-backend: minio
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ egress:
+ - to:
+ - podSelector:
+ matchLabels:
+ app: minio
+ namespaceSelector:
+ matchLabels:
+ kubernetes.io/metadata.name: tracing
+ ports:
+ - port: 9000
+ protocol: TCP
+{{- else }}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: tempo-distributed-egress-all
+ namespace: tracing
+ labels:
+ cluster.kfd.sighup.io/module: tracing
+spec:
+ policyTypes:
+ - Egress
+ podSelector:
+ matchLabels:
+ app.kubernetes.io/name: tempo
+ egress:
+ - {}
+{{- end }}
+---
diff --git a/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl
index 362180241..adff73665 100644
--- a/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl
+++ b/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl
@@ -46,4 +46,4 @@ spec:
port:
name: http
{{ end }}
-{{- template "ingressTls" (dict "module" "tracing" "package" "minio-tracing" "prefix" "minio-tracing." "spec" .spec) }}
+{{- template "ingressTls" (dict "module" "tracing" "package" "minio" "prefix" "minio-tracing." "spec" .spec) }}
diff --git a/templates/distribution/scripts/apply.sh.tpl b/templates/distribution/scripts/apply.sh.tpl
index e04b62446..a8d0c883f 100644
--- a/templates/distribution/scripts/apply.sh.tpl
+++ b/templates/distribution/scripts/apply.sh.tpl
@@ -43,7 +43,7 @@ $kubectlbin create namespace calico-system --dry-run=client -o yaml | $kubectlbi
< out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin apply -f - --server-side
< out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin wait --for condition=established --timeout=60s -f -
-echo "Clean up init jobs, since they cannot be changed without conficts and they are idempotent by nature..."
+echo "Clean up old init jobs..."
$kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-setup -n kube-system
$kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-logging-buckets-setup -n logging
@@ -62,14 +62,14 @@ $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-tracing-bu
| $kubectlbin apply -f - --server-side
{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }}
-$kubectlbin rollout status daemonset nginx-ingress-controller-external -n ingress-nginx --timeout=180s
+$kubectlbin rollout status daemonset ingress-nginx-controller-external -n ingress-nginx --timeout=180s
-$kubectlbin rollout status daemonset nginx-ingress-controller-internal -n ingress-nginx --timeout=180s
+$kubectlbin rollout status daemonset ingress-nginx-controller-internal -n ingress-nginx --timeout=180s
{{- end }}
{{- if eq .spec.distribution.modules.ingress.nginx.type "single" }}
-$kubectlbin rollout status daemonset nginx-ingress-controller -n ingress-nginx --timeout=180s
+$kubectlbin rollout status daemonset ingress-nginx-controller -n ingress-nginx --timeout=180s
{{- end }}
diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl
index 2ab5185ec..6b41a562a 100644
--- a/templates/distribution/scripts/pre-apply.sh.tpl
+++ b/templates/distribution/scripts/pre-apply.sh.tpl
@@ -17,6 +17,21 @@ vendorPath="{{ .paths.vendorPath }}"
# Text generated with: https://www.patorjk.com/software/taag/#p=display&f=ANSI%20Regular&t=TRACING%20TYPE
+# โโโ โโ โโโโโโโ โโโโโโโโ โโ โโ โโโโโโ โโโโโโ โโ โโ โโโโโโ โโโโโโ โโ โโ โโโโโโ โโ โโโโโโโ โโโโโโโ
+# โโโโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ
+# โโ โโ โโ โโโโโ โโ โโ โ โโ โโ โโ โโโโโโ โโโโโ โโโโโโ โโ โโ โโ โโ โโ โโ โโโโโ โโโโโโโ
+# โโ โโ โโ โโ โโ โโ โโโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ โโ
+# โโ โโโโ โโโโโโโ โโ โโโ โโโ โโโโโโ โโ โโ โโ โโ โโ โโโโโโ โโโโโโโ โโ โโโโโโ โโ โโโโโโโ โโโโโโโ
+
+{{- if index .reducers "distributionCommonNetworkPoliciesEnabled" }}
+
+{{- if eq .reducers.distributionCommonNetworkPoliciesEnabled.to false }}
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/module
+ echo "KFD Network Policies deleted"
+{{- end }}
+
+{{- end }}
+
# โโ โโโโโโ โโโโโโ โโโโโโ โโ โโโ โโ โโโโโโ โโโโโโโโ โโ โโ โโโโโโ โโโโโโโ
# โโ โโ โโ โโ โโ โโ โโโโ โโ โโ โโ โโ โโ โโ โโ โโ
# โโ โโ โโ โโ โโโ โโ โโโ โโ โโ โโ โโ โโ โโโ โโ โโโโ โโโโโโ โโโโโ
@@ -29,8 +44,8 @@ deleteOpensearch() {
$kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n logging opensearch-dashboards
$kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n pomerium opensearch-dashboards
- $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch.yaml
- $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple >> delete-opensearch.yaml
+ $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch-dashboards.yaml
+ $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple > delete-opensearch.yaml
{{- if eq .spec.distribution.modules.monitoring.type "none" }}
if ! $kubectlbin get apiservice v1.monitoring.coreos.com; then
@@ -41,6 +56,8 @@ deleteOpensearch() {
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch.yaml
$kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=opensearch pvc -n logging --wait --timeout=180s
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch-dashboards.yaml
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=opensearch
echo "OpenSearch resources deleted"
}
@@ -57,6 +74,7 @@ deleteLoki() {
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-loki.yaml
$kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=loki-distributed pvc -n logging --wait --timeout=180s
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=loki
echo "Loki resources deleted"
}
@@ -81,6 +99,7 @@ $kustomizebin build $vendorPath/modules/logging/katalog/minio-ha > delete-loggin
fi
{{- end }}
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-logging-minio-ha.yaml
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-backend=minio
echo "Minio Logging deleted"
}
@@ -165,7 +184,7 @@ deleteGatekeeper() {
$kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/monitoring | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f -
{{- end }}
$kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/core | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f -
-
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=gatekeeper
echo "Gatekeeper resources deleted"
}
@@ -173,6 +192,7 @@ deleteKyverno() {
$kustomizebin build $vendorPath/modules/opa/katalog/kyverno | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f -
$kubectlbin delete --ignore-not-found --wait --timeout=180s validatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno
$kubectlbin delete --ignore-not-found --wait --timeout=180s mutatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=kyverno
echo "Kyverno resources deleted"
}
@@ -295,6 +315,7 @@ deleteTracingMinioHA() {
fi
{{- end }}
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-tracing-minio-ha.yaml
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/tracing-backend=minio
echo "Minio HA on tracing namespace deleted"
}
@@ -558,8 +579,8 @@ deleteNginx() {
$kustomizebin build $vendorPath/modules/ingress/katalog/nginx > delete-nginx.yaml
$kustomizebin build $vendorPath/modules/ingress/katalog/dual-nginx > delete-dual-nginx.yaml
- $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns.yaml
- $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private >> delete-external-dns.yaml
+ $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns-public.yaml
+ $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private > delete-external-dns-private.yaml
$kustomizebin build $vendorPath/modules/ingress/katalog/forecastle > delete-forecastle.yaml
{{- if eq .spec.distribution.modules.monitoring.type "none" }}
@@ -568,13 +589,16 @@ deleteNginx() {
cp delete-nginx-filtered.yaml delete-nginx.yaml
cat delete-dual-nginx.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-dual-nginx-filtered.yaml
cp delete-dual-nginx-filtered.yaml delete-dual-nginx.yaml
- cat delete-external-dns.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-filtered.yaml
- cp delete-external-dns-filtered.yaml delete-external-dns.yaml
+ cat delete-external-dns-public.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-public-filtered.yaml
+ cp delete-external-dns-public-filtered.yaml delete-external-dns-public.yaml
+ cat delete-external-dns-private.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-private-filtered.yaml
+ cp delete-external-dns-private-filtered.yaml delete-external-dns-private.yaml
cat delete-forecastle.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-forecastle-filtered.yaml
cp delete-forecastle-filtered.yaml delete-forecastle.yaml
fi
{{- end }}
- $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns.yaml
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-public.yaml
+ $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-private.yaml
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-forecastle.yaml
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-dual-nginx.yaml
$kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-nginx.yaml
@@ -696,6 +720,13 @@ echo "Finished clean up tasks for migrating Auth type from SSO to basicAuth."
{{- end }}
{{- end }}
+{{- if eq .reducers.distributionModulesAuthProviderType.from "none" }}
+ {{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }}
+ # we need to delete infra ingresses that are present on each namespace before switching to sso, because they will be recreated in the pomerium namespace.
+ deleteInfraIngresses
+ {{- end }}
+{{- end }}
+
{{- if eq .reducers.distributionModulesAuthProviderType.from "basicAuth" }}
{{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }}
echo "Running clean up tasks for migrating Auth type from basicAuth to SSO..."
diff --git a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl
index ce574ac1b..c03abb66b 100644
--- a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl
+++ b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl
@@ -32,6 +32,7 @@ cluster_service_ipv4_cidr = null
cluster_service_ipv4_cidr = {{ .spec.kubernetes.serviceIpV4Cidr | quote }}
{{- end }}
node_pools_launch_kind = {{ .spec.kubernetes.nodePoolsLaunchKind | quote }}
+node_pools_global_ami_type = {{ .spec.kubernetes.nodePoolGlobalAmiType | quote }}
{{- if hasKeyAny .spec.kubernetes "logRetentionDays" }}
cluster_log_retention_days = {{ .spec.kubernetes.logRetentionDays }}
@@ -97,7 +98,11 @@ workers_iam_role_name_prefix_override = {{ .spec.kubernetes.workersIAMRoleNamePr
{{- end}}
{{- if hasKeyAny $np "ami" }}
- {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id "ami_owners" (list $np.ami.owner)) }}
+ {{- if and (eq $np.type "self-managed") (hasKeyAny $np.ami "id") (not (hasKeyAny $np.ami "type")) }}
+ {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id "ami_owners" (list $np.ami.owner)) }}
+ {{- else if and (hasKeyAny $np.ami "type") (not (hasKeyAny $np.ami "id")) }}
+ {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_type" $np.ami.type) }}
+ {{- end }}
{{- end }}
{{- if hasKeyAny $np.instance "spot" }}
diff --git a/templates/kubernetes/ekscluster/terraform/main.tf.tpl b/templates/kubernetes/ekscluster/terraform/main.tf.tpl
index 5932b4169..615ca5b50 100644
--- a/templates/kubernetes/ekscluster/terraform/main.tf.tpl
+++ b/templates/kubernetes/ekscluster/terraform/main.tf.tpl
@@ -62,6 +62,7 @@ module "fury" {
ssh_public_key = var.ssh_public_key
node_pools = var.node_pools
node_pools_launch_kind = var.node_pools_launch_kind
+ node_pools_global_ami_type = var.node_pools_global_ami_type
tags = var.tags
cluster_iam_role_name = var.cluster_iam_role_name_prefix_override
workers_role_name = var.workers_iam_role_name_prefix_override
diff --git a/templates/kubernetes/ekscluster/terraform/variables.tf b/templates/kubernetes/ekscluster/terraform/variables.tf
index 30dc3547c..993e88ef0 100644
--- a/templates/kubernetes/ekscluster/terraform/variables.tf
+++ b/templates/kubernetes/ekscluster/terraform/variables.tf
@@ -63,19 +63,21 @@ variable "ssh_public_key" {
variable "node_pools" {
description = "An object list defining node pools configurations"
type = list(object({
- name = string
type = optional(string, "self-managed") # "eks-managed" or "self-managed"
+ name = string
ami_id = optional(string)
- version = optional(string) # null to use cluster_version
+ ami_owners = optional(list(string), ["amazon"])
+ ami_type = optional(string, null)
+ version = optional(string, null) # null to use cluster_version
min_size = number
max_size = number
instance_type = string
- container_runtime = optional(string)
- spot_instance = optional(bool)
- max_pods = optional(number) # null to use default upstream configuration
+ container_runtime = optional(string, "containerd")
+ spot_instance = optional(bool, false)
+ max_pods = optional(number, null) # null to use default upstream configuration
volume_size = optional(number, 100)
volume_type = optional(string, "gp2")
- subnets = optional(list(string)) # null to use default upstream configuration
+ subnets = optional(list(string), null) # null to use default upstream configuration
labels = optional(map(string))
taints = optional(list(string))
tags = optional(map(string))
@@ -219,3 +221,13 @@ variable "workers_iam_role_name_prefix_override" {
type = string
default = ""
}
+
+variable "node_pools_global_ami_type" {
+ type = string
+ description = "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool."
+ default = "alinux2"
+ validation {
+ condition = contains(["alinux2", "alinux2023"], var.node_pools_global_ami_type)
+ error_message = "The global AMI type must be either 'alinux2' or 'alinux2023'."
+ }
+}
\ No newline at end of file
diff --git a/tests/e2e-kfddistribution-upgrades.sh b/tests/e2e-kfddistribution-upgrades.sh
index 93eac8f20..543772664 100755
--- a/tests/e2e-kfddistribution-upgrades.sh
+++ b/tests/e2e-kfddistribution-upgrades.sh
@@ -6,21 +6,12 @@
set -e
echo "----------------------------------------------------------------------------"
-echo "Executing furyctl for the initial setup"
-/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml --outdir "$PWD" --disable-analytics
+echo "Executing furyctl for the initial setup 1.29.4"
+/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml --outdir "$PWD" --disable-analytics
echo "----------------------------------------------------------------------------"
-echo "Executing upgrade to an intermediate version"
-/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml --outdir "$PWD" --force upgrades --disable-analytics
-
-echo "----------------------------------------------------------------------------"
-echo "Executing upgrade to the next version"
-/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml --outdir "$PWD" --force upgrades --disable-analytics
-
-echo "----------------------------------------------------------------------------"
-echo "Executing upgrade to the next version"
-/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml --outdir "$PWD" --force upgrades --disable-analytics
-
-echo "----------------------------------------------------------------------------"
-echo "Executing upgrade to the latest version"
-/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics
+echo "Executing upgrade to 1.30.0"
+# we set the switch date for Loki to "tomorrow". Notice that `-d flag` does not work on Darwin, you need to use `-v +1d` instead.
+# this is needed only when upgrading from 1.29.4 to 1.30.0 (and equivalent versions)
+yq -i ".spec.distribution.modules.logging.loki.tsdbStartDate=\"$(date -I -d '+1 day')\"" tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml
+/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics
diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml
deleted file mode 100644
index 3370d37e9..000000000
--- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
----
-apiVersion: kfd.sighup.io/v1alpha2
-kind: KFDDistribution
-metadata:
- name: sighup
-spec:
- distributionVersion: v1.29.0
- # This section describes how the KFD distribution will be installed
- distribution:
- kubeconfig: "{env://KUBECONFIG}"
- # This common configuration will be applied to all the packages that will be installed in the cluster
- common: {}
- # This section contains all the configurations for all the KFD core modules
- modules:
- networking:
- type: calico
- # This section contains all the configurations for the ingress module
- ingress:
- baseDomain: fury.sighup.cc
- nginx:
- type: single
- tls:
- provider: certManager
- certManager:
- clusterIssuer:
- name: letsencrypt-fury
- email: sighup@sighup.cc
- type: http01
- logging:
- type: loki
- minio:
- storageSize: 20Gi
- rootUser:
- username: sighup
- password: secretpassword1
- monitoring:
- type: prometheus
- prometheus:
- resources:
- requests:
- cpu: 10m
- limits:
- cpu: 2000m
- memory: 6Gi
- tracing:
- type: none
- policy:
- type: kyverno
- kyverno:
- additionalExcludedNamespaces: ["local-path-storage"]
- validationFailureAction: Enforce
- installDefaultPolicies: true
- dr:
- type: on-premises
- velero: {}
- auth:
- provider:
- type: basicAuth
- basicAuth:
- username: test
- password: testpassword
- # patches for kind compatibility and resource setting
- customPatches:
- patchesStrategicMerge:
- - |
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: minio-logging
- namespace: logging
- spec:
- template:
- spec:
- containers:
- - name: minio
- resources:
- requests:
- cpu: 10m
- memory: 50Mi
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-common
- namespace: logging
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-etcd
- namespace: logging
- - |
- $patch: delete
- apiVersion: apps/v1
- kind: DaemonSet
- metadata:
- name: x509-certificate-exporter-control-plane
- namespace: monitoring
diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml
deleted file mode 100644
index f8e198b63..000000000
--- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
----
-apiVersion: kfd.sighup.io/v1alpha2
-kind: KFDDistribution
-metadata:
- name: sighup
-spec:
- distributionVersion: v1.29.1
- # This section describes how the KFD distribution will be installed
- distribution:
- kubeconfig: "{env://KUBECONFIG}"
- # This common configuration will be applied to all the packages that will be installed in the cluster
- common: {}
- # This section contains all the configurations for all the KFD core modules
- modules:
- networking:
- type: calico
- # This section contains all the configurations for the ingress module
- ingress:
- baseDomain: fury.sighup.cc
- nginx:
- type: single
- tls:
- provider: certManager
- certManager:
- clusterIssuer:
- name: letsencrypt-fury
- email: sighup@sighup.cc
- type: http01
- logging:
- type: loki
- minio:
- storageSize: 20Gi
- rootUser:
- username: sighup
- password: secretpassword1
- monitoring:
- type: prometheus
- prometheus:
- resources:
- requests:
- cpu: 10m
- limits:
- cpu: 2000m
- memory: 6Gi
- tracing:
- type: none
- policy:
- type: kyverno
- kyverno:
- additionalExcludedNamespaces: ["local-path-storage"]
- validationFailureAction: Enforce
- installDefaultPolicies: true
- dr:
- type: on-premises
- velero: {}
- auth:
- provider:
- type: basicAuth
- basicAuth:
- username: test
- password: testpassword
- # patches for kind compatibility and resource setting
- customPatches:
- patchesStrategicMerge:
- - |
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: minio-logging
- namespace: logging
- spec:
- template:
- spec:
- containers:
- - name: minio
- resources:
- requests:
- cpu: 10m
- memory: 50Mi
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-common
- namespace: logging
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-etcd
- namespace: logging
- - |
- $patch: delete
- apiVersion: apps/v1
- kind: DaemonSet
- metadata:
- name: x509-certificate-exporter-control-plane
- namespace: monitoring
diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml
deleted file mode 100644
index ce58ffd8e..000000000
--- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
----
-apiVersion: kfd.sighup.io/v1alpha2
-kind: KFDDistribution
-metadata:
- name: sighup
-spec:
- distributionVersion: v1.29.2
- # This section describes how the KFD distribution will be installed
- distribution:
- kubeconfig: "{env://KUBECONFIG}"
- # This common configuration will be applied to all the packages that will be installed in the cluster
- common: {}
- # This section contains all the configurations for all the KFD core modules
- modules:
- networking:
- type: calico
- # This section contains all the configurations for the ingress module
- ingress:
- baseDomain: fury.sighup.cc
- nginx:
- type: single
- tls:
- provider: certManager
- certManager:
- clusterIssuer:
- name: letsencrypt-fury
- email: sighup@sighup.cc
- type: http01
- logging:
- type: loki
- minio:
- storageSize: 20Gi
- rootUser:
- username: sighup
- password: secretpassword1
- monitoring:
- type: prometheus
- prometheus:
- resources:
- requests:
- cpu: 10m
- limits:
- cpu: 2000m
- memory: 6Gi
- tracing:
- type: none
- policy:
- type: kyverno
- kyverno:
- additionalExcludedNamespaces: ["local-path-storage"]
- validationFailureAction: Enforce
- installDefaultPolicies: true
- dr:
- type: on-premises
- velero: {}
- auth:
- provider:
- type: basicAuth
- basicAuth:
- username: test
- password: testpassword
- # patches for kind compatibility and resource setting
- customPatches:
- patchesStrategicMerge:
- - |
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: minio-logging
- namespace: logging
- spec:
- template:
- spec:
- containers:
- - name: minio
- resources:
- requests:
- cpu: 10m
- memory: 50Mi
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-common
- namespace: logging
- - |
- $patch: delete
- apiVersion: logging-extensions.banzaicloud.io/v1alpha1
- kind: HostTailer
- metadata:
- name: systemd-etcd
- namespace: logging
- - |
- $patch: delete
- apiVersion: apps/v1
- kind: DaemonSet
- metadata:
- name: x509-certificate-exporter-control-plane
- namespace: monitoring
diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml
index d261e88cf..43ef4e72d 100644
--- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml
+++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml
@@ -52,7 +52,7 @@ spec:
type: kyverno
kyverno:
additionalExcludedNamespaces: ["local-path-storage"]
- validationFailureAction: Enforce
+ validationFailureAction: enforce
installDefaultPolicies: true
dr:
type: on-premises
diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml
similarity index 95%
rename from tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml
rename to tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml
index 682a19275..c9a4de25b 100644
--- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml
+++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.3
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -32,6 +32,8 @@ spec:
type: http01
logging:
type: loki
+ loki:
+ tsdbStartDate: "2024-11-28" # this should be a day in the future when upgrading
minio:
storageSize: 20Gi
rootUser:
diff --git a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml
index a30f86d37..3696821c0 100644
--- a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml
+++ b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +58,7 @@ spec:
password: secretpassword2
tracing:
type: tempo
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +73,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml
index 922a40ed2..61f87e7a3 100644
--- a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml
+++ b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +58,7 @@ spec:
password: secretpassword2
tracing:
type: tempo
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +73,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml
index 775e53d79..ccf9c5f75 100644
--- a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml
+++ b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -55,11 +56,11 @@ spec:
rootUser:
username: sighup
password: secretpassword2
- alertmanager:
+ alertmanager:
installDefaultRules: false
tracing:
type: tempo
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -74,7 +75,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml
index 021b2dfbf..fc5b82ac8 100644
--- a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +58,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +73,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml
index 3bf20f34b..b7af467b7 100644
--- a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +58,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +73,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml
index 1a52d03a0..68355ca88 100644
--- a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,9 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
backend: minio
+ tsdbStartDate: "2024-11-21"
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +42,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +58,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +73,7 @@ spec:
validationFailureAction: Enforce
dr:
type: none
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml
index ffe270e96..e4498dd78 100644
--- a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,6 @@ spec:
type: http01
logging:
type: none
- loki:
- backend: minio
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +39,7 @@ spec:
password: secretpassword1
monitoring:
type: mimir
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +55,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +70,7 @@ spec:
validationFailureAction: Enforce
dr:
type: none
- velero:
+ velero:
backend: minio
auth:
provider:
diff --git a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml
index 0b4d82ec3..73d273bcc 100644
--- a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -32,8 +32,6 @@ spec:
type: http01
logging:
type: none
- loki:
- backend: minio
minio:
storageSize: 20Gi
rootUser:
@@ -82,7 +80,8 @@ spec:
password: testpassword
# patches for kind compatibility and resource setting
customPatches:
- patchesStrategicMerge: []
+ patchesStrategicMerge:
+ []
#- |
# apiVersion: apps/v1
# kind: StatefulSet
diff --git a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml
index ddd3770f0..97103487f 100644
--- a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml
+++ b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,6 @@ spec:
type: http01
logging:
type: none
- loki:
- backend: minio
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +39,7 @@ spec:
password: secretpassword1
monitoring:
type: none
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +55,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,7 +70,7 @@ spec:
validationFailureAction: Enforce
dr:
type: none
- velero:
+ velero:
backend: minio
auth:
provider:
@@ -85,8 +83,8 @@ spec:
SHARED_SECRET: "LEjtmaKtiCB2qA5rtFSHWiWAzkdFftADf/q2xWT64dg="
SIGNING_KEY: "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU9DMHBBQmx4ZS84bjRQcHBBVUE1QnRxam96Z3dDZVpvRDI2c056TGRiS1hvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFRUpDc253MHlXODRLZXhVSjQ5M21MMG9tNFN5dzJBeGtWOGFpRkxDZFdKaVBYamtUMDE1QwowclJsV2tqNVdlQUhqYmVncmRNL2QyejZTbzY3MWs3TVpRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo="
baseDomain: fury.sighup.cc
-
- dex:
+
+ dex:
connectors:
- type: ldap
id: ldap
@@ -112,7 +110,8 @@ spec:
nameAttr: cn
# patches for kind compatibility and resource setting
customPatches:
- patchesStrategicMerge: []
+ patchesStrategicMerge:
+ []
#- |
# apiVersion: apps/v1
# kind: StatefulSet
@@ -180,6 +179,6 @@ spec:
# name: x509-certificate-exporter-control-plane
# namespace: monitoring
plugins:
- kustomize:
+ kustomize:
- name: ldap-server
folder: ./plugins/ldap-server
diff --git a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml
index 2a195558b..700430afa 100644
--- a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,6 @@ spec:
type: http01
logging:
type: none
- loki:
- backend: minio
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +39,7 @@ spec:
password: secretpassword1
monitoring:
type: none
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +55,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,14 +70,15 @@ spec:
validationFailureAction: Enforce
dr:
type: none
- velero:
+ velero:
backend: minio
auth:
provider:
type: none
# patches for kind compatibility and resource setting
customPatches:
- patchesStrategicMerge: []
+ patchesStrategicMerge:
+ []
#- |
# apiVersion: apps/v1
# kind: StatefulSet
@@ -147,6 +146,6 @@ spec:
# name: x509-certificate-exporter-control-plane
# namespace: monitoring
plugins:
- kustomize:
+ kustomize:
- name: ldap-server
folder: ./plugins/ldap-server
diff --git a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml
index 9e0f9d0ff..21eb481ef 100644
--- a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml
+++ b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,8 +32,6 @@ spec:
type: http01
logging:
type: none
- loki:
- backend: minio
minio:
storageSize: 20Gi
rootUser:
@@ -41,7 +39,7 @@ spec:
password: secretpassword1
monitoring:
type: none
- mimir:
+ mimir:
backend: minio
prometheus:
resources:
@@ -57,7 +55,7 @@ spec:
password: secretpassword2
tracing:
type: none
- tempo:
+ tempo:
backend: minio
minio:
storageSize: 20Gi
@@ -72,14 +70,15 @@ spec:
validationFailureAction: Enforce
dr:
type: none
- velero:
+ velero:
backend: minio
auth:
provider:
type: none
# patches for kind compatibility and resource setting
customPatches:
- patchesStrategicMerge: []
+ patchesStrategicMerge:
+ []
#- |
# apiVersion: apps/v1
# kind: StatefulSet
diff --git a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml
index 2dcca9681..f6793804f 100644
--- a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml
+++ b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
diff --git a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml
index 34af41579..d31aced8a 100644
--- a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml
+++ b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -37,6 +37,9 @@ spec:
rootUser:
username: sighup
password: secretpassword1
+ loki:
+ backend: minio
+ tsdbStartDate: "2024-11-21"
monitoring:
type: mimir
prometheus:
diff --git a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml
index 7bb8cc590..d69e98465 100644
--- a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml
+++ b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml
@@ -8,7 +8,7 @@ kind: KFDDistribution
metadata:
name: sighup
spec:
- distributionVersion: v1.29.2
+ distributionVersion: v1.30.0
# This section describes how the KFD distribution will be installed
distribution:
kubeconfig: "{env://KUBECONFIG}"
@@ -16,7 +16,7 @@ spec:
common: {}
# This section contains all the configurations for all the KFD core modules
modules:
- networking:
+ networking:
type: calico
# This section contains all the configurations for the ingress module
ingress:
@@ -32,7 +32,8 @@ spec:
type: http01
logging:
type: loki
- loki:
+ loki:
+ tsdbStartDate: "2024-11-21"
backend: externalEndpoint
externalEndpoint:
endpoint: 192.168.1.100:9000
@@ -42,7 +43,7 @@ spec:
bucketName: loki
monitoring:
type: mimir
- mimir:
+ mimir:
backend: externalEndpoint
externalEndpoint:
endpoint: 192.168.1.100:9000
@@ -52,7 +53,7 @@ spec:
bucketName: mimir
tracing:
type: tempo
- tempo:
+ tempo:
backend: externalEndpoint
externalEndpoint:
endpoint: 192.168.1.100:9000
@@ -68,7 +69,7 @@ spec:
validationFailureAction: Enforce
dr:
type: on-premises
- velero:
+ velero:
backend: externalEndpoint
externalEndpoint:
endpoint: 192.168.1.100:9000
@@ -116,4 +117,4 @@ spec:
kind: DaemonSet
metadata:
name: x509-certificate-exporter-control-plane
- namespace: monitoring
\ No newline at end of file
+ namespace: monitoring
diff --git a/tests/schema.sh b/tests/schema.sh
index c1320c506..06f133371 100755
--- a/tests/schema.sh
+++ b/tests/schema.sh
@@ -29,7 +29,7 @@ test_schema() {
yq "tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.yaml" -o json > "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json"
validate() {
- jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json"
+ jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" 2>&1
}
run validate
@@ -63,8 +63,8 @@ test_schema() {
expect() {
expect_no "${1}"
- local EXPECTED_ERROR_1="[S#/\$defs/Spec/else/properties/kubernetes/properties/vpcId/type] expected null, but got string"
- local EXPECTED_ERROR_2="[S#/\$defs/Spec/else/properties/kubernetes/properties/subnetIds/type] expected null, but got array"
+ local EXPECTED_ERROR_1="at '/spec/kubernetes/vpcId': got string, want null"
+ local EXPECTED_ERROR_2="at '/spec/kubernetes/subnetIds': got array, want null"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -92,7 +92,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec/then/properties/kubernetes/required] missing properties: 'vpcId', 'subnetIds'"
+ local EXPECTED_ERROR_1="at '/spec/kubernetes': missing properties 'vpcId', 'subnetIds'"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -116,8 +116,8 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/dex/type] expected null, but got object"
- local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/pomerium/type] expected null, but got object"
+ local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/dex': got object, want null"
+ local EXPECTED_ERROR_2="at '/spec/distribution/modules/auth/pomerium': got object, want null"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -145,7 +145,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/1/then/properties/provider/required] missing properties: 'basicAuth'"
+ local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/provider': missing property 'basicAuth'"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -169,7 +169,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/else/properties/modules/properties/aws/type] expected null, but got object"
+ local EXPECTED_ERROR_1="at '/spec/distribution/modules/aws': got object, want null"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -193,8 +193,8 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS/then/required] missing properties: 'secret'"
- local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'"
+ local EXPECTED_ERROR_1="at '/spec/distribution/modules/ingress/nginx/tls': missing property 'secret'"
+ local EXPECTED_ERROR_2="at '/spec/distribution/modules': missing property 'aws'"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -222,7 +222,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'"
+ local EXPECTED_ERROR_1="at '/spec/distribution/modules': missing property 'aws'"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -246,7 +246,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.CustomPatches.Patch/oneOf] valid against schemas at indexes 0 and 1"
+ local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/patches/0': oneOf failed, subschemas 0, 1 matched"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -270,7 +270,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="additionalProperties 'type' not allowed"
+ local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/configMapGenerator/0': additional properties 'type' not allowed"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -294,7 +294,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="expected null, but got string"
+ local EXPECTED_ERROR_1="at '/spec/infrastructure/vpn/vpcId': got string, want null"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
@@ -318,7 +318,7 @@ test_schema() {
expect() {
expect_no
- local EXPECTED_ERROR_1="missing properties: 'vpcId'"
+ local EXPECTED_ERROR_1=" at '/spec/infrastructure/vpn': missing property 'vpcId'"
if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then
return 2
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml
index a77ee5ed8..562c66dc4 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml
@@ -52,10 +52,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml
index 54c739aa1..6242d3fc3 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml
index b283a5734..4ae4d5a2b 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml
@@ -19,10 +19,12 @@ spec:
kubernetes:
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml
index ce0c069b7..a441afdec 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml
@@ -29,10 +29,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml
index 883048d06..d1421afba 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml
index 4e1677078..5629057b5 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml
index 1dad8e9dd..aa75e4853 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml
index 0738fbbb2..4f0950e57 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml
index d78345fae..f770659ef 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml
index a121a534d..afdfeb7d1 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml
index d8ffae0ee..f1b942728 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml
index cf50e7bab..7d0d4e962 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml
index f59c5b000..ff0ed51dd 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml
index b96b1925f..3c098542f 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml
index a92184088..d01de7030 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml
index fa26569fd..9cbd6ec62 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml
index 4516f01a3..f857d991c 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml
index fa26569fd..9cbd6ec62 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml
index 484b9722c..1b35fe071 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml
index 0844c8324..192ec94da 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml
index 7caeeac88..724b2849c 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml
@@ -37,10 +37,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml
index 3224f62f8..ebc3c17c8 100644
--- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml
@@ -38,10 +38,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml
new file mode 100644
index 000000000..8aab91a86
--- /dev/null
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml
@@ -0,0 +1,135 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Tests the following cases:
+
+# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled
+# When I validate the config against the schema
+# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned
+
+---
+apiVersion: kfd.sighup.io/v1alpha2
+kind: EKSCluster
+metadata:
+ name: furyctl-dev-aws-al
+spec:
+ infrastructure:
+ vpn:
+ ssh:
+ allowedFromCidrs:
+ - 0.0.0.0/0
+ githubUsersName:
+ - jnardiello
+ publicKeys:
+ - ssh-ed25519 SomethingSomething engineering@sighup.io
+ vpnClientsSubnetCidr: 192.168.200.0/24
+ kubernetes:
+ apiServer:
+ privateAccess: true
+ privateAccessCidrs: ["10.0.0.3/16"]
+ publicAccessCidrs: []
+ publicAccess: false
+ vpcId: vpc-0123456789abcdef0
+ subnetIds:
+ - subnet-0123456789abcdef0
+ - subnet-0123456789abcdef1
+ - subnet-0123456789abcdef2
+ nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
+ nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
+ nodePools:
+ - ami:
+ id: ami-01234567890123456
+ owner: "123456789012"
+ type: eks-managed
+ instance:
+ type: t3.large
+ name: worker-eks
+ size:
+ max: 3
+ min: 2
+ distribution:
+ customPatches:
+ configMapGenerator:
+ - name: a-configmap
+ files:
+ - /path/to/config.example
+ - name: b-configmap
+ envs:
+ - /path/to/envs.env
+ patches:
+ - target:
+ group: ""
+ version: v1
+ kind: Service
+ name: cluster-autoscaler
+ namespace: kube-system
+ path: /path/to/patch.yaml
+ patchesStrategicMerge:
+ - |
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ label1: value1
+ name: cluster-autoscaler
+ namespace: kube-system
+ secretGenerator:
+ - name: a-secret
+ files:
+ - /path/to/config.example
+ - name: b-secret
+ envs:
+ - /path/to/envs.env
+ common:
+ provider:
+ type: eks
+ modules:
+ aws: {}
+ dr:
+ type: eks
+ velero:
+ eks:
+ bucketName: example-velero
+ region: eu-west-1
+ ingress:
+ baseDomain: furyctl-demo.sighup.io
+ dns:
+ private:
+ create: true
+ name: internal.furyctl-demo.sighup.io
+ public:
+ create: true
+ name: furyctl-demo.sighup.io
+ nginx:
+ type: single
+ tls:
+ provider: secret
+ secret:
+ ca: |
+ value
+ cert: |
+ value
+ key: |
+ value
+ logging:
+ type: opensearch
+ opensearch:
+ type: single
+ policy:
+ type: gatekeeper
+ gatekeeper:
+ additionalExcludedNamespaces: []
+ installDefaultPolicies: true
+ enforcementAction: deny
+ distributionVersion: v1.24.1
+ region: eu-west-1
+ toolsConfiguration:
+ terraform:
+ state:
+ s3:
+ bucketName: furyctl-test-eks
+ keyPrefix: furyctl-test
+ region: eu-west-1
diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml
new file mode 100644
index 000000000..f5787979d
--- /dev/null
+++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml
@@ -0,0 +1,157 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Tests the following cases:
+
+# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled
+# When I validate the config against the schema
+# Then no errors are returned
+
+---
+apiVersion: kfd.sighup.io/v1alpha2
+kind: EKSCluster
+metadata:
+ name: furyctl-dev-aws-al
+spec:
+ infrastructure:
+ vpn:
+ vpcId: vpc-0123456789abcdef0
+ ssh:
+ allowedFromCidrs:
+ - 0.0.0.0/0
+ githubUsersName:
+ - jnardiello
+ publicKeys:
+ - ssh-ed25519 SomethingSomething engineering@sighup.io
+ vpnClientsSubnetCidr: 192.168.200.0/24
+ kubernetes:
+ apiServer:
+ privateAccess: true
+ privateAccessCidrs: ["10.0.0.3/16"]
+ publicAccessCidrs: []
+ publicAccess: false
+ vpcId: vpc-0123456789abcdef0
+ subnetIds:
+ - subnet-0123456789abcdef0
+ - subnet-0123456789abcdef1
+ - subnet-0123456789abcdef2
+ nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
+ nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
+ nodePools:
+ - ami:
+ id: ami-01234567890123456
+ owner: "123456789012"
+ type: eks-managed
+ instance:
+ type: t3.large
+ name: worker-eks
+ size:
+ max: 3
+ min: 2
+ distribution:
+ customPatches:
+ configMapGenerator:
+ - name: a-configmap
+ files:
+ - /path/to/config.example
+ - name: b-configmap
+ envs:
+ - /path/to/envs.env
+ patches:
+ - target:
+ group: ""
+ version: v1
+ kind: Service
+ name: cluster-autoscaler
+ namespace: kube-system
+ path: /path/to/patch.yaml
+ patchesStrategicMerge:
+ - |
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ label1: value1
+ name: cluster-autoscaler
+ namespace: kube-system
+ secretGenerator:
+ - name: a-secret
+ files:
+ - /path/to/config.example
+ - name: b-secret
+ envs:
+ - /path/to/envs.env
+ common:
+ provider:
+ type: eks
+ modules:
+ aws:
+ clusterAutoscaler:
+ iamRoleArn: arn:aws:iam::123456789012:role/cluster-autoscaler
+ ebsCsiDriver:
+ iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver
+ loadBalancerController:
+ iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller
+ overrides: {}
+ dr:
+ type: eks
+ velero:
+ eks:
+ bucketName: example-velero
+ region: eu-west-1
+ iamRoleArn: arn:aws:iam::123456789012:role/velero
+ ingress:
+ baseDomain: furyctl-demo.sighup.io
+ dns:
+ private:
+ create: true
+ name: internal.furyctl-demo.sighup.io
+ vpcId: vpc-12345678901234567
+ public:
+ create: true
+ name: furyctl-demo.sighup.io
+ nginx:
+ type: single
+ tls:
+ provider: secret
+ secret:
+ ca: |
+ value
+ cert: |
+ value
+ key: |
+ value
+ certManager:
+ clusterIssuer:
+ name: letsencrypt-fury
+ email: email@test.it
+ type: http01
+ route53:
+ region: eu-west-1
+ hostedZoneId: Z1234567890
+ iamRoleArn: arn:aws:iam::123456789012:role/cert-manager
+ externalDns:
+ privateIamRoleArn: arn:aws:iam::123456789012:role/external-dns-private
+ publicIamRoleArn: arn:aws:iam::123456789012:role/external-dns-public
+ logging:
+ type: opensearch
+ opensearch:
+ type: single
+ policy:
+ type: gatekeeper
+ gatekeeper:
+ additionalExcludedNamespaces: []
+ installDefaultPolicies: true
+ enforcementAction: deny
+ distributionVersion: v1.24.1
+ region: eu-west-1
+ toolsConfiguration:
+ terraform:
+ state:
+ s3:
+ bucketName: furyctl-test-eks
+ keyPrefix: furyctl-test
+ region: eu-west-1
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml
index 4f152ad42..5593b9eea 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml
@@ -52,10 +52,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml
index ef7e71b3a..6dc006a1e 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml
index ab277b1db..87c241192 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml
@@ -19,10 +19,12 @@ spec:
kubernetes:
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml
index 44f8f4a7e..d0488d417 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml
@@ -29,10 +29,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml
index eca2f32cd..6571a4e9e 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml
index 2b634eb7d..3676cf950 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml
index 46d733beb..c9015eed3 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml
index e3c0320bc..f532e7dd4 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml
index 703123b34..b1cb81933 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml
index 033761852..b17f26041 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml
index 3c0c8e6dc..59ed50619 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml
@@ -46,10 +46,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml
index 55f04cbab..aadc3b4ba 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml
index f59c5b000..ff0ed51dd 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml
index 79e7d86cb..7c7798b76 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml
index 7ba29188c..5b5bb45f6 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml
index 660e03ea2..4376557a4 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml
index bf088d77c..7bc5e19aa 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml
index b99c536b6..242de8a65 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml
index a553e542a..29cf21dbc 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml
@@ -45,10 +45,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml
index 8e2e4f7d0..ac7701cfe 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml
@@ -44,10 +44,12 @@ spec:
publicAccess: false
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml
index 7caeeac88..724b2849c 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml
@@ -37,10 +37,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml
index 2bbc729bd..0c0d66952 100644
--- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml
@@ -38,10 +38,12 @@ spec:
- subnet-0123456789abcdef2
nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
nodePools:
- ami:
id: ami-01234567890123456
owner: "123456789012"
+ type: self-managed
instance:
type: t3.large
name: worker-eks
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml
new file mode 100644
index 000000000..8aab91a86
--- /dev/null
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml
@@ -0,0 +1,135 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Tests the following cases:
+
+# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled
+# When I validate the config against the schema
+# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned
+
+---
+apiVersion: kfd.sighup.io/v1alpha2
+kind: EKSCluster
+metadata:
+ name: furyctl-dev-aws-al
+spec:
+ infrastructure:
+ vpn:
+ ssh:
+ allowedFromCidrs:
+ - 0.0.0.0/0
+ githubUsersName:
+ - jnardiello
+ publicKeys:
+ - ssh-ed25519 SomethingSomething engineering@sighup.io
+ vpnClientsSubnetCidr: 192.168.200.0/24
+ kubernetes:
+ apiServer:
+ privateAccess: true
+ privateAccessCidrs: ["10.0.0.3/16"]
+ publicAccessCidrs: []
+ publicAccess: false
+ vpcId: vpc-0123456789abcdef0
+ subnetIds:
+ - subnet-0123456789abcdef0
+ - subnet-0123456789abcdef1
+ - subnet-0123456789abcdef2
+ nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
+ nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
+ nodePools:
+ - ami:
+ id: ami-01234567890123456
+ owner: "123456789012"
+ type: eks-managed
+ instance:
+ type: t3.large
+ name: worker-eks
+ size:
+ max: 3
+ min: 2
+ distribution:
+ customPatches:
+ configMapGenerator:
+ - name: a-configmap
+ files:
+ - /path/to/config.example
+ - name: b-configmap
+ envs:
+ - /path/to/envs.env
+ patches:
+ - target:
+ group: ""
+ version: v1
+ kind: Service
+ name: cluster-autoscaler
+ namespace: kube-system
+ path: /path/to/patch.yaml
+ patchesStrategicMerge:
+ - |
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ label1: value1
+ name: cluster-autoscaler
+ namespace: kube-system
+ secretGenerator:
+ - name: a-secret
+ files:
+ - /path/to/config.example
+ - name: b-secret
+ envs:
+ - /path/to/envs.env
+ common:
+ provider:
+ type: eks
+ modules:
+ aws: {}
+ dr:
+ type: eks
+ velero:
+ eks:
+ bucketName: example-velero
+ region: eu-west-1
+ ingress:
+ baseDomain: furyctl-demo.sighup.io
+ dns:
+ private:
+ create: true
+ name: internal.furyctl-demo.sighup.io
+ public:
+ create: true
+ name: furyctl-demo.sighup.io
+ nginx:
+ type: single
+ tls:
+ provider: secret
+ secret:
+ ca: |
+ value
+ cert: |
+ value
+ key: |
+ value
+ logging:
+ type: opensearch
+ opensearch:
+ type: single
+ policy:
+ type: gatekeeper
+ gatekeeper:
+ additionalExcludedNamespaces: []
+ installDefaultPolicies: true
+ enforcementAction: deny
+ distributionVersion: v1.24.1
+ region: eu-west-1
+ toolsConfiguration:
+ terraform:
+ state:
+ s3:
+ bucketName: furyctl-test-eks
+ keyPrefix: furyctl-test
+ region: eu-west-1
diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml
new file mode 100644
index 000000000..5b6353984
--- /dev/null
+++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml
@@ -0,0 +1,135 @@
+# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Tests the following cases:
+
+# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled
+# When I validate the config against the schema
+# Then no errors are returned
+
+---
+apiVersion: kfd.sighup.io/v1alpha2
+kind: EKSCluster
+metadata:
+ name: furyctl-dev-aws-al
+spec:
+ infrastructure:
+ vpn:
+ ssh:
+ allowedFromCidrs:
+ - 0.0.0.0/0
+ githubUsersName:
+ - jnardiello
+ publicKeys:
+ - ssh-ed25519 SomethingSomething engineering@sighup.io
+ vpnClientsSubnetCidr: 192.168.200.0/24
+ kubernetes:
+ apiServer:
+ privateAccess: true
+ privateAccessCidrs: ["10.0.0.3/16"]
+ publicAccessCidrs: []
+ publicAccess: false
+ vpcId: vpc-0123456789abcdef0
+ subnetIds:
+ - subnet-0123456789abcdef0
+ - subnet-0123456789abcdef1
+ - subnet-0123456789abcdef2
+ nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io
+ nodePoolsLaunchKind: both
+ nodePoolGlobalAmiType: "alinux2"
+ nodePools:
+ - ami:
+ id: ami-01234567890123456
+ owner: "123456789012"
+ type: eks-managed
+ instance:
+ type: t3.large
+ name: worker-eks
+ size:
+ max: 3
+ min: 2
+ distribution:
+ customPatches:
+ configMapGenerator:
+ - name: a-configmap
+ files:
+ - /path/to/config.example
+ - name: b-configmap
+ envs:
+ - /path/to/envs.env
+ patches:
+ - target:
+ group: ""
+ version: v1
+ kind: Service
+ name: cluster-autoscaler
+ namespace: kube-system
+ path: /path/to/patch.yaml
+ patchesStrategicMerge:
+ - |
+ ---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ label1: value1
+ name: cluster-autoscaler
+ namespace: kube-system
+ secretGenerator:
+ - name: a-secret
+ files:
+ - /path/to/config.example
+ - name: b-secret
+ envs:
+ - /path/to/envs.env
+ common:
+ provider:
+ type: eks
+ modules:
+ aws: {}
+ dr:
+ type: eks
+ velero:
+ eks:
+ bucketName: example-velero
+ region: eu-west-1
+ ingress:
+ baseDomain: furyctl-demo.sighup.io
+ dns:
+ private:
+ create: true
+ name: internal.furyctl-demo.sighup.io
+ public:
+ create: true
+ name: furyctl-demo.sighup.io
+ nginx:
+ type: single
+ tls:
+ provider: secret
+ secret:
+ ca: |
+ value
+ cert: |
+ value
+ key: |
+ value
+ logging:
+ type: opensearch
+ opensearch:
+ type: single
+ policy:
+ type: gatekeeper
+ gatekeeper:
+ additionalExcludedNamespaces: []
+ installDefaultPolicies: true
+ enforcementAction: deny
+ distributionVersion: v1.24.1
+ region: eu-west-1
+ toolsConfiguration:
+ terraform:
+ state:
+ s3:
+ bucketName: furyctl-test-eks
+ keyPrefix: furyctl-test
+ region: eu-west-1