From 274f79e48577ba0cc2cf2454bf62a8a939a6bdad Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Mon, 28 Oct 2024 17:16:14 +0100 Subject: [PATCH 001/160] feat: bump to 1.30 for the OnPremises installer (and bump also EKS for consistency (not tested yet)) --- kfd.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index 576f2bd1c..e6b4d826d 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -2,7 +2,7 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -version: v1.29.4 +version: v1.30.0 modules: auth: v0.3.0 aws: v4.2.1 @@ -18,8 +18,8 @@ kubernetes: version: 1.29 installer: v3.1.2 onpremises: - version: 1.29.3 - installer: v1.29.3-rev.2 + version: 1.30.5 + installer: v1.30.5-rc.0 furyctlSchemas: eks: - apiVersion: kfd.sighup.io/v1alpha2 @@ -35,7 +35,7 @@ tools: furyagent: version: 0.4.0 kubectl: - version: 1.29.3 + version: 1.30.5 kustomize: version: 3.10.0 terraform: From c6c6d0ffdc9dd18692323c0074eaa57dfb037880 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Tue, 29 Oct 2024 17:03:00 +0100 Subject: [PATCH 002/160] feat: bump version 1.30 with the latest --- kfd.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index e6b4d826d..a5ae1d46e 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -15,11 +15,11 @@ modules: tracing: v1.0.3 kubernetes: eks: - version: 1.29 + version: 1.30 installer: v3.1.2 onpremises: - version: 1.30.5 - installer: v1.30.5-rc.0 + version: 1.30.6 + installer: v1.30.6-rc.0 furyctlSchemas: eks: - apiVersion: kfd.sighup.io/v1alpha2 From 9bd1d6aa8b2f2a1fe61851564e4e28bbf689c8d1 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 31 Oct 2024 15:13:22 +0100 Subject: [PATCH 003/160] Feat: add property spec.distribution.common.networkPoliciesEnabled --- defaults/ekscluster-kfd-v1alpha2.yaml | 2 +- defaults/kfddistribution-kfd-v1alpha2.yaml | 2 +- defaults/onpremises-kfd-v1alpha2.yaml | 2 +- docs/schemas/ekscluster-kfd-v1alpha2.md | 44 ++++++++++++------- docs/schemas/kfddistribution-kfd-v1alpha2.md | 44 ++++++++++++------- docs/schemas/onpremises-kfd-v1alpha2.md | 44 ++++++++++++------- .../ekscluster/v1alpha2/private/schema.go | 3 ++ pkg/apis/ekscluster/v1alpha2/public/schema.go | 3 ++ .../kfddistribution/v1alpha2/public/schema.go | 3 ++ pkg/apis/onpremises/v1alpha2/public/schema.go | 3 ++ schemas/private/ekscluster-kfd-v1alpha2.json | 4 ++ schemas/public/ekscluster-kfd-v1alpha2.json | 4 ++ .../public/kfddistribution-kfd-v1alpha2.json | 4 ++ schemas/public/onpremises-kfd-v1alpha2.json | 4 ++ 14 files changed, 118 insertions(+), 48 deletions(-) diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 6c708be00..3cfe3419d 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: eks - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index d0c790257..7e2f6531d 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: none - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml index f26ad1e6e..69ca84fc7 100644 --- a/defaults/onpremises-kfd-v1alpha2.yaml +++ b/defaults/onpremises-kfd-v1alpha2.yaml @@ -15,7 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: none - + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 7521e3c34..1d147f794 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -84,13 +84,20 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Properties -| Property | Type | Required | -|:----------------------------------------------------------------|:---------|:---------| -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | +| Property | Type | Required | +|:------------------------------------------------------------------------|:----------|:---------| +| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | + +## .spec.distribution.common.networkPoliciesEnabled + +### Description + +This field defines whether Network Policies are provided for all modules ## .spec.distribution.common.nodeSelector @@ -5474,14 +5481,15 @@ Overrides the default IAM role name prefix for the EKS workers ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5489,6 +5497,12 @@ Overrides the default IAM role name prefix for the EKS workers The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 6118a1540..a26f82393 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -76,13 +76,20 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Properties -| Property | Type | Required | -|:----------------------------------------------------------------|:---------|:---------| -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | +| Property | Type | Required | +|:------------------------------------------------------------------------|:----------|:---------| +| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | + +## .spec.distribution.common.networkPoliciesEnabled + +### Description + +This field defines whether Network Policies are provided for all modules ## .spec.distribution.common.nodeSelector @@ -4102,14 +4109,15 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4117,6 +4125,12 @@ The type of tracing to use, either ***none*** or ***tempo*** The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index f620b0661..c379b5147 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -80,18 +80,25 @@ The name of the cluster. It will also be used as a prefix for all the other reso ### Properties -| Property | Type | Required | -|:----------------------------------------------------------------|:---------|:---------| -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | +| Property | Type | Required | +|:------------------------------------------------------------------------|:----------|:---------| +| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | ### Description Common configuration for all the distribution modules. +## .spec.distribution.common.networkPoliciesEnabled + +### Description + +This field defines whether Network Policies are provided for all modules + ## .spec.distribution.common.nodeSelector ### Description @@ -5150,14 +5157,15 @@ The subnet CIDR to use for the Services network. ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5165,6 +5173,12 @@ The subnet CIDR to use for the Services network. The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 4ea507871..8387312c4 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -89,6 +89,9 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + + // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index ff34c16a3..b5f254855 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -89,6 +89,9 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + + // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 9a4f9ca9e..96d1445b9 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -75,6 +75,9 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + + // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 854a24a16..1724c7df8 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -84,6 +84,9 @@ type SpecDistributionCommon struct { // value: infra // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + + // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 2aa905308..11dbd1c43 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -209,6 +209,10 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + }, + "networkPoliciesEnabled": { + "type": "boolean", + "description": "This field defines whether Network Policies are provided for all modules" } } }, diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 49e3379dc..e393abebc 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1026,6 +1026,10 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + }, + "networkPoliciesEnabled": { + "type": "boolean", + "description": "This field defines whether Network Policies are provided for all modules" } } }, diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 3e4451b36..1358f70d8 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -156,6 +156,10 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + }, + "networkPoliciesEnabled": { + "type": "boolean", + "description": "This field defines whether Network Policies are provided for all modules" } } }, diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index cc808f71e..d5c217bb4 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -727,6 +727,10 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + }, + "networkPoliciesEnabled": { + "type": "boolean", + "description": "This field defines whether Network Policies are provided for all modules" } } }, From 52f20663a92a86997ff52b0cccc4aebc5e8f7863 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 31 Oct 2024 15:28:46 +0100 Subject: [PATCH 004/160] feat(logging): add network policies --- .../manifests/logging/kustomization.yaml.tpl | 3 + .../logging/policies/common.yaml.tpl | 37 +++++ .../logging/policies/configs.yaml.tpl | 23 ++++ .../logging/policies/fluentbit.yaml.tpl | 66 +++++++++ .../logging/policies/fluentd.yaml.tpl | 129 ++++++++++++++++++ .../logging/policies/kustomization.yaml.tpl | 26 ++++ .../policies/logging-operator.yaml.tpl | 22 +++ .../manifests/logging/policies/loki.yaml.tpl | 123 +++++++++++++++++ .../manifests/logging/policies/minio.yaml.tpl | 107 +++++++++++++++ .../policies/opensearch-dashboards.yaml.tpl | 30 ++++ .../logging/policies/opensearch.yaml.tpl | 116 ++++++++++++++++ 11 files changed, 682 insertions(+) create mode 100644 templates/distribution/manifests/logging/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/configs.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/fluentd.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/loki.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl create mode 100644 templates/distribution/manifests/logging/policies/opensearch.yaml.tpl diff --git a/templates/distribution/manifests/logging/kustomization.yaml.tpl b/templates/distribution/manifests/logging/kustomization.yaml.tpl index c8f361bfd..9b1b42738 100644 --- a/templates/distribution/manifests/logging/kustomization.yaml.tpl +++ b/templates/distribution/manifests/logging/kustomization.yaml.tpl @@ -47,6 +47,9 @@ resources: - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/logging/katalog/loki-distributed" }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} # The kustomize version we are using does not support specifing more than 1 strategicMerge patch # in a single YAML file under the `patches` directive like the old versions did for `patchesStrategicMerge`. diff --git a/templates/distribution/manifests/logging/policies/common.yaml.tpl b/templates/distribution/manifests/logging/policies/common.yaml.tpl new file mode 100644 index 000000000..12cc3830b --- /dev/null +++ b/templates/distribution/manifests/logging/policies/common.yaml.tpl @@ -0,0 +1,37 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: logging +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-dns-access + namespace: logging +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 diff --git a/templates/distribution/manifests/logging/policies/configs.yaml.tpl b/templates/distribution/manifests/logging/policies/configs.yaml.tpl new file mode 100644 index 000000000..aa35827c7 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/configs.yaml.tpl @@ -0,0 +1,23 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: event-tailer-egress-apiserver + namespace: logging + labels: + app.kubernetes.io/name: event-tailer +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: event-tailer + egress: + - ports: + - port: 6443 + protocol: TCP +--- diff --git a/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl new file mode 100644 index 000000000..9213c688a --- /dev/null +++ b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl @@ -0,0 +1,66 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-egress-fluentd + namespace: logging + labels: + app.kubernetes.io/name: fluentbit +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + egress: + - ports: + # fluentd + - port: 24240 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-egress-apiserver + namespace: logging + labels: + app.kubernetes.io/name: fluentbit +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentbit-ingress-prometheus-metrics + namespace: logging + labels: + app.kubernetes.io/name: fluentbit +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 2020 + protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl new file mode 100644 index 000000000..6faba450c --- /dev/null +++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl @@ -0,0 +1,129 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-ingress-fluentbit + namespace: logging + labels: + app.kubernetes.io/name: fluentd +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: fluentbit + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 24240 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-egress-minio + namespace: logging + labels: + app.kubernetes.io/name: fluentd +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-ingress-prometheus-metrics + namespace: logging + labels: + app.kubernetes.io/name: fluentd +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 24231 + protocol: TCP +--- +{{- if eq .spec.distribution.modules.logging.type "opensearch" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-egress-opensearch + namespace: logging + labels: + app.kubernetes.io/name: fluentd +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP +--- +{{- end }} +{{- if eq .spec.distribution.modules.logging.type "loki" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: fluentd-egress-loki + namespace: logging + labels: + app.kubernetes.io/name: fluentd +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + app.kubernetes.io/component: gateway + ports: + - port: 8080 + protocol: TCP +--- +{{- end }} diff --git a/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..17f67c2ef --- /dev/null +++ b/templates/distribution/manifests/logging/policies/kustomization.yaml.tpl @@ -0,0 +1,26 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +{{ $loggingType := .spec.distribution.modules.logging.type }} + +resources: + - common.yaml + - configs.yaml + - fluentbit.yaml + - fluentd.yaml + - logging-operator.yaml + - minio.yaml + +{{- if eq $loggingType "loki" }} + - loki.yaml +{{- end }} + +{{- if eq $loggingType "opensearch" }} + - opensearch-dashboards.yaml + - opensearch.yaml +{{- end }} diff --git a/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl new file mode 100644 index 000000000..7aa37e605 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: logging-operator-egress-apiserver + namespace: logging + labels: + app.kubernetes.io/name: logging-operator +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: logging-operator + egress: + - ports: + - port: 6443 + protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl new file mode 100644 index 000000000..612f970d8 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -0,0 +1,123 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-fluentd + namespace: logging + labels: + app.kubernetes.io/name: loki-distributed +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + app.kubernetes.io/component: gateway + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ports: + - port: 8080 + protocol: TCP + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-prometheus-metrics + namespace: logging + labels: + app.kubernetes.io/name: loki-distributed +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + ingress: + - ports: + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-discovery + namespace: logging + labels: + app.kubernetes.io/name: loki-distributed +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 3100 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-egress-minio + namespace: logging + labels: + app.kubernetes.io/name: loki-distributed +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 9000 + protocol: TCP \ No newline at end of file diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl new file mode 100644 index 000000000..d39e31fa1 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl @@ -0,0 +1,107 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: logging + labels: + app: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-apiserver + namespace: logging + labels: + app: minio-logging-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-logging-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: logging + labels: + app: minio-logging-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-logging-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: logging + labels: + app: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus \ No newline at end of file diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl new file mode 100644 index 000000000..4fa8cb920 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-dashboards-egress-opensearch + namespace: logging + labels: + app: opensearch-dashboards +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: opensearch-dashboards + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP + \ No newline at end of file diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl new file mode 100644 index 000000000..2bc23a6c7 --- /dev/null +++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl @@ -0,0 +1,116 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-dashboards + namespace: logging + labels: + app.kubernetes.io/name: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: opensearch-dashboards + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-fluentd + namespace: logging + labels: + app.kubernetes.io/name: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: fluentd + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-discovery + namespace: logging + labels: + app.kubernetes.io/name: opensearch +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9300 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9300 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-prometheus-metrics + namespace: logging + labels: + app.kubernetes.io/name: opensearch +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9108 + protocol: TCP From b1e2bfed80eb764c01ee9df164a7579e29a0d939 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Tue, 5 Nov 2024 16:11:16 +0100 Subject: [PATCH 005/160] feat(monitoring): add network policies --- .../monitoring/kustomization.yaml.tpl | 4 + .../monitoring/policies/alertmanager.yaml.tpl | 42 +++++++ .../policies/blackbox-exporter.yaml.tpl | 32 ++++++ .../monitoring/policies/common.yaml.tpl | 38 ++++++ .../monitoring/policies/grafana.yaml.tpl | 30 +++++ .../policies/kube-state-metrics.yaml.tpl | 32 ++++++ .../policies/kustomization.yaml.tpl | 31 +++++ .../monitoring/policies/mimir.yaml.tpl | 102 +++++++++++++++++ .../monitoring/policies/minio.yaml.tpl | 108 ++++++++++++++++++ .../policies/node-exporter.yaml.tpl | 30 +++++ .../policies/prometheus-adapter.yaml.tpl | 23 ++++ .../policies/prometheus-operator.yaml.tpl | 30 +++++ .../monitoring/policies/prometheus.yaml.tpl | 46 ++++++++ .../policies/x509-exporter.yaml.tpl | 45 ++++++++ 14 files changed, 593 insertions(+) create mode 100644 templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl create mode 100644 templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl diff --git a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl index 9297778b3..955daf763 100644 --- a/templates/distribution/manifests/monitoring/kustomization.yaml.tpl +++ b/templates/distribution/manifests/monitoring/kustomization.yaml.tpl @@ -54,6 +54,10 @@ resources: - secrets/alertmanager.yml {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml {{- if eq .spec.distribution.common.provider.type "eks" }}{{/* in EKS there are no files to monitor on nodes */}} diff --git a/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl new file mode 100644 index 000000000..3070f3092 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl @@ -0,0 +1,42 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/alertmanager-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: alertmanager-main + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9093 + protocol: TCP + - port: 8080 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: alertmanager + ports: + - port: 9094 + protocol: TCP + - port: 9094 + protocol: UDP + podSelector: + matchLabels: + app.kubernetes.io/component: alert-router + app.kubernetes.io/instance: main + app.kubernetes.io/name: alertmanager + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl new file mode 100644 index 000000000..4546ba928 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl @@ -0,0 +1,32 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +#ย source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/blackboxExporter-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: blackbox-exporter + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9115 + protocol: TCP + - port: 19115 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: blackbox-exporter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl new file mode 100644 index 000000000..0e071544b --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl @@ -0,0 +1,38 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: monitoring +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-dns-access + namespace: monitoring +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl new file mode 100644 index 000000000..fc7dcfc78 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/grafana-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: grafana + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 3000 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl new file mode 100644 index 000000000..b38a925b8 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl @@ -0,0 +1,32 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/kubeStateMetrics-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kube-state-metrics + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 8443 + protocol: TCP + - port: 9443 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..23e788bf9 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl @@ -0,0 +1,31 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yml + - prometheus-operator.yaml + - kube-state-metrics.yaml + - node-exporter.yaml + - x509-exporter.yaml + - blackbox-exporter.yaml + +{{- if or (eq $monitoringType "prometheus") (eq $monitoringType "mimir") }} + - alertmanager.yaml + - prometheus-adapter.yaml + - grafana.yaml +{{- end }} +{{- if eq $monitoringType "prometheus" }} + - prometheus.yaml +{{- end }} +{{- if eq $monitoringType "mimir" }} + - mimir.yaml +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} + - minio.yaml +{{- end }} +{{- end }} diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl new file mode 100644 index 000000000..85ec2dd95 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl @@ -0,0 +1,102 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-ingress-prometheus-metrics + namespace: monitoring + labels: + app.kubernetes.io/name: mimir +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + ingress: + - ports: + - port: 8080 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-discovery + namespace: monitoring + labels: + app.kubernetes.io/name: mimir +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 8080 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + - port: 8080 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: mimir +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-egress-minio + namespace: monitoring + labels: + app.kubernetes.io/name: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 9000 + protocol: TCP +{{- end }} diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl new file mode 100644 index 000000000..0f58a8dad --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -0,0 +1,108 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: monitoring + labels: + app: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-apiserver + namespace: monitoring + labels: + app: minio-monitoring-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-monitoring-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: monitoring + labels: + app: minio-monitoring-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-monitoring-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: monitoring + labels: + app: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl new file mode 100644 index 000000000..0e8649e62 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/nodeExporter-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: node-exporter + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9100 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl new file mode 100644 index 000000000..01f6f2320 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl @@ -0,0 +1,23 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusAdapter-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-adapter + namespace: monitoring +spec: + egress: + - {} + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/component: metrics-adapter + app.kubernetes.io/name: prometheus-adapter + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl new file mode 100644 index 000000000..efea3a9cf --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheusOperator-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-operator + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 8443 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: prometheus-operator + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress + \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl new file mode 100644 index 000000000..7cc8e2444 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl @@ -0,0 +1,46 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheus-networkPolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-k8s + namespace: monitoring +spec: + egress: + - {} + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9090 + protocol: TCP + - port: 8080 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus-adapter + ports: + - port: 9090 + protocol: TCP + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: grafana + ports: + - port: 9090 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Egress + - Ingress diff --git a/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl new file mode 100644 index 000000000..519ecc98b --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl @@ -0,0 +1,45 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: x509-exporter-egress-apiserver + namespace: monitoring + labels: + app: x509-certificate-exporter +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: x509-certificate-exporter + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: x509-exporter-ingress-prometheus-metrics + namespace: monitoring + labels: + app: x509-certificate-exporter +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: x509-certificate-exporter + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9793 + protocol: TCP + \ No newline at end of file From b3af3f41aee0be0039065603c32799303067e0de Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Tue, 5 Nov 2024 16:32:17 +0100 Subject: [PATCH 006/160] fix(monitoring): fix kustomization --- .../manifests/monitoring/policies/kustomization.yaml.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl index 23e788bf9..149718da4 100644 --- a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl @@ -8,7 +8,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - common.yml + - common.yaml - prometheus-operator.yaml - kube-state-metrics.yaml - node-exporter.yaml From 44b1ca6035e992a311592695d856c31762f6bc60 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Tue, 5 Nov 2024 16:36:54 +0100 Subject: [PATCH 007/160] feat(tracing): add network policies --- .../manifests/tracing/kustomization.yaml.tpl | 4 + .../tracing/policies/common.yaml.tpl | 38 +++++++ .../tracing/policies/kustomization.yaml.tpl | 14 +++ .../manifests/tracing/policies/minio.yaml.tpl | 101 ++++++++++++++++++ .../manifests/tracing/policies/tempo.yaml.tpl | 92 ++++++++++++++++ 5 files changed, 249 insertions(+) create mode 100644 templates/distribution/manifests/tracing/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/minio.yaml.tpl create mode 100644 templates/distribution/manifests/tracing/policies/tempo.yaml.tpl diff --git a/templates/distribution/manifests/tracing/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/kustomization.yaml.tpl index bf54f6130..aab87047e 100644 --- a/templates/distribution/manifests/tracing/kustomization.yaml.tpl +++ b/templates/distribution/manifests/tracing/kustomization.yaml.tpl @@ -17,6 +17,10 @@ resources: {{- end }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml {{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} diff --git a/templates/distribution/manifests/tracing/policies/common.yaml.tpl b/templates/distribution/manifests/tracing/policies/common.yaml.tpl new file mode 100644 index 000000000..a61acfccc --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/common.yaml.tpl @@ -0,0 +1,38 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: tracing +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-dns-access + namespace: tracing +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + \ No newline at end of file diff --git a/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..96e0dff5c --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/kustomization.yaml.tpl @@ -0,0 +1,14 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - tempo.yaml +{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} + - minio.yaml +{{- end }} diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl new file mode 100644 index 000000000..809000ec8 --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -0,0 +1,101 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-namespace + namespace: tracing + labels: + app: minio +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + ports: + - port: 9000 + protocol: TCP + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-apiserver + namespace: tracing + labels: + app: minio-tracing-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-tracing-buckets-setup + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-buckets-setup-egress-minio + namespace: tracing + labels: + app: minio-tracing-buckets-setup +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio-tracing-buckets-setup + egress: + - ports: + - port: 9000 + protocol: TCP + to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-prometheus-metrics + namespace: tracing + labels: + app: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - ports: + - port: 9000 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl new file mode 100644 index 000000000..339407c4a --- /dev/null +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -0,0 +1,92 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-discovery + namespace: tracing + labels: + app.kubernetes.io/name: tempo +spec: + policyTypes: + - Ingress + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + ingress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - ports: + - port: 9095 + protocol: TCP + - port: 7946 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app.kubernetes.io/name: tempo +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-ingress-prometheus-metrics + namespace: tracing + labels: + app.kubernetes.io/name: tempo +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + ingress: + - ports: + - port: 3100 + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus +{{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-egress-minio + namespace: tracing + labels: + app.kubernetes.io/name: tempo +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - to: + - podSelector: + matchLabels: + app: minio + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + ports: + - port: 9000 + protocol: TCP +{{- end }} From 405039d8ebdea2e7ec6e2baff85e2fcb99443879 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 7 Nov 2024 14:04:52 +0100 Subject: [PATCH 008/160] feat(monitoring,tracing): add network policy egress --- .../monitoring/policies/mimir.yaml.tpl | 17 +++++++++++++++++ .../manifests/tracing/policies/tempo.yaml.tpl | 17 +++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl index 85ec2dd95..09aec2061 100644 --- a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl @@ -99,4 +99,21 @@ spec: ports: - port: 9000 protocol: TCP +{{- else }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimir-distributed-egress-all + namespace: monitoring + labels: + app.kubernetes.io/name: mimir +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: mimir + egress: + - {} {{- end }} diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index 339407c4a..15bf01aed 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -89,4 +89,21 @@ spec: ports: - port: 9000 protocol: TCP +{{- else }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributed-egress-all + namespace: tracing + labels: + app.kubernetes.io/name: tempo +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + egress: + - {} {{- end }} From 346206cc754095ef5050baea07c1ec4cf97566b7 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 7 Nov 2024 17:31:47 +0100 Subject: [PATCH 009/160] feat(monitoring): add network policies for ingress --- .../monitoring/policies/common.yaml.tpl | 2 + .../monitoring/policies/ingress.yaml.tpl | 145 ++++++++++++++++++ .../policies/kustomization.yaml.tpl | 4 + 3 files changed, 151 insertions(+) create mode 100644 templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl diff --git a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl index 0e071544b..fe3565120 100644 --- a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl @@ -35,4 +35,6 @@ spec: ports: - protocol: UDP port: 53 + - protocol: TCP + port: 53 \ No newline at end of file diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl new file mode 100644 index 000000000..272b278c3 --- /dev/null +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -0,0 +1,145 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: grafana-ingress-nginxingresscontroller + namespace: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 3000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-ingress-nginxingresscontroller + namespace: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: alert-router + app.kubernetes.io/instance: main + app.kubernetes.io/name: alertmanager + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9090 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: alertmanager-ingress-nginxingresscontroller + namespace: monitoring +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress + ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9093 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl index 149718da4..4c7d44a53 100644 --- a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl @@ -29,3 +29,7 @@ resources: - minio.yaml {{- end }} {{- end }} + +{{- if and (ne .spec.distribution.modules.ingress.nginx.type "none") }}{{/* we don't need ingresses for Prometheus in Agent mode */}} + - ingress.yaml +{{- end }} From 860bffb1a0bdf365c509a741a81a9621f8b37fdc Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 7 Nov 2024 17:33:34 +0100 Subject: [PATCH 010/160] fix(monitoring): fix network policy with sso --- .../manifests/monitoring/policies/ingress.yaml.tpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl index 272b278c3..9124b3bab 100644 --- a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -40,7 +40,7 @@ spec: - from: - namespaceSelector: matchLabels: - kubernetes.io/metadata.name: ingress-nginx + kubernetes.io/metadata.name: pomerium podSelector: matchLabels: app: pomerium @@ -87,7 +87,7 @@ spec: - from: - namespaceSelector: matchLabels: - kubernetes.io/metadata.name: ingress-nginx + kubernetes.io/metadata.name: pomerium podSelector: matchLabels: app: pomerium @@ -134,7 +134,7 @@ spec: - from: - namespaceSelector: matchLabels: - kubernetes.io/metadata.name: ingress-nginx + kubernetes.io/metadata.name: pomerium podSelector: matchLabels: app: pomerium From 3121451373f187d5d2b3aed81092c13c50907221 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Fri, 8 Nov 2024 09:42:32 +0100 Subject: [PATCH 011/160] feat: changes to support ingress v3.0.0 Changes to support Ingress module v3 that has been aligned with upstream chart. Note: this is not complete. Some other changes may be necessary. --- .../distribution/manifests/ingress/kustomization.yaml.tpl | 6 +++--- .../manifests/ingress/patches/infra-nodes.yml.tpl | 6 +++--- templates/distribution/scripts/apply.sh.tpl | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/templates/distribution/manifests/ingress/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/kustomization.yaml.tpl index ad0a7dae4..374c70bba 100644 --- a/templates/distribution/manifests/ingress/kustomization.yaml.tpl +++ b/templates/distribution/manifests/ingress/kustomization.yaml.tpl @@ -92,14 +92,14 @@ patchesJson6902: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller-external + name: ingress-nginx-controller-external namespace: ingress-nginx path: patchesJson/ingress-nginx.yml - target: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx path: patchesJson/ingress-nginx.yml {{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} @@ -107,7 +107,7 @@ patchesJson6902: group: apps version: v1 kind: DaemonSet - name: nginx-ingress-controller + name: ingress-nginx-controller namespace: ingress-nginx path: patchesJson/ingress-nginx.yml {{- end }} diff --git a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl index f94fe9a6c..2ba355781 100644 --- a/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/infra-nodes.yml.tpl @@ -66,7 +66,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller-external + name: ingress-nginx-controller-external namespace: ingress-nginx spec: template: @@ -79,7 +79,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx spec: template: @@ -93,7 +93,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx-ingress-controller + name: ingress-nginx-controller namespace: ingress-nginx spec: template: diff --git a/templates/distribution/scripts/apply.sh.tpl b/templates/distribution/scripts/apply.sh.tpl index e04b62446..33f57dd52 100644 --- a/templates/distribution/scripts/apply.sh.tpl +++ b/templates/distribution/scripts/apply.sh.tpl @@ -62,14 +62,14 @@ $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-tracing-bu | $kubectlbin apply -f - --server-side {{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} -$kubectlbin rollout status daemonset nginx-ingress-controller-external -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller-external -n ingress-nginx --timeout=180s -$kubectlbin rollout status daemonset nginx-ingress-controller-internal -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller-internal -n ingress-nginx --timeout=180s {{- end }} {{- if eq .spec.distribution.modules.ingress.nginx.type "single" }} -$kubectlbin rollout status daemonset nginx-ingress-controller -n ingress-nginx --timeout=180s +$kubectlbin rollout status daemonset ingress-nginx-controller -n ingress-nginx --timeout=180s {{- end }} From 16a9206ab7919404623ac15da4d6bd413e18395e Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 11:34:04 +0100 Subject: [PATCH 012/160] feat(opa): add network policies --- .../manifests/opa/kustomization.yaml.tpl | 4 + .../opa/policies/gatekeeper/audit.yaml.tpl | 19 ++++ .../opa/policies/gatekeeper/common.yaml.tpl | 37 +++++++ .../gatekeeper/controller-manager.yaml.tpl | 37 +++++++ .../gatekeeper-policy-manager.yaml.tpl | 42 ++++++++ .../gatekeeper/kustomization.yaml.tpl | 15 +++ .../gatekeeper/prometheus-metrics.yaml.tpl | 26 +++++ .../opa/policies/kustomization.yaml.tpl | 16 +++ .../opa/policies/kyverno/common.yaml.tpl | 36 +++++++ .../policies/kyverno/kustomization.yaml.tpl | 11 +++ .../opa/policies/kyverno/kyverno.yaml.tpl | 99 +++++++++++++++++++ 11 files changed, 342 insertions(+) create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl diff --git a/templates/distribution/manifests/opa/kustomization.yaml.tpl b/templates/distribution/manifests/opa/kustomization.yaml.tpl index 6b6672a33..b31532d5b 100644 --- a/templates/distribution/manifests/opa/kustomization.yaml.tpl +++ b/templates/distribution/manifests/opa/kustomization.yaml.tpl @@ -27,6 +27,10 @@ resources: {{- end }} {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml {{- if .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces }} diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl new file mode 100644 index 000000000..53cdeb2c5 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl @@ -0,0 +1,19 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: auditcontroller-egress-apiserver + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + control-plane: audit-controller + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl new file mode 100644 index 000000000..99acf05f9 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl @@ -0,0 +1,37 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: gatekeeper-system +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 \ No newline at end of file diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl new file mode 100644 index 000000000..81371c828 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl @@ -0,0 +1,37 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: controllermanager-egress-kubeapiserver + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: controllermanager-ingress-kubeapiserver + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 8443 + - protocol: TCP + port: 443 diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl new file mode 100644 index 000000000..2e894493b --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl @@ -0,0 +1,42 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gatekeeperpolicymanager-egress-kubeapiserver + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + app: gatekeeper-policy-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gatekeeperpolicymanager-ingress-gatekeeper + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + app: gatekeeper-policy-manager + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium + ports: + - protocol: TCP + port: 8080 diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl new file mode 100644 index 000000000..79f5cfce0 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - audit.yaml + - controller-manager.yaml + - gatekeeper-policy-manager.yaml + - prometheus-metrics.yaml + diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..389e63871 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl @@ -0,0 +1,26 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: gatekeeper-ingress-prometheusmetrics + namespace: gatekeeper-system +spec: + podSelector: + matchLabels: + gatekeeper.sh/system: "yes" + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 8888 diff --git a/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..aed10dc32 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kustomization.yaml.tpl @@ -0,0 +1,16 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +{{- if eq .spec.distribution.modules.policy.type "gatekeeper" }} + - gatekeeper +{{- end }} +{{- if eq .spec.distribution.modules.policy.type "kyverno" }} + - kyverno +{{- end }} + diff --git a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl new file mode 100644 index 000000000..a620ebf7e --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl @@ -0,0 +1,36 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: kyverno +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: kyverno +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 diff --git a/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl new file mode 100644 index 000000000..77a88b0bb --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/kustomization.yaml.tpl @@ -0,0 +1,11 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - kyverno.yaml diff --git a/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl new file mode 100644 index 000000000..4246589b1 --- /dev/null +++ b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl @@ -0,0 +1,99 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernoadmission-egress-kubeapiserver + namespace: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: admission-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernoadmission-ingress-nodes + namespace: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: admission-controller + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernobackground-egress-kubeapiserver + namespace: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: background-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernoreports-egress-kubeapiserver + namespace: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: reports-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernocleanup-egress-kubeapiserver + namespace: kyverno +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: cleanup-controller + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: kyvernocleanupreports-egress-kubeapiserver + namespace: kyverno +spec: + podSelector: + matchExpressions: + - { key: "batch.kubernetes.io/job-name", operator: "Exists" } + policyTypes: + - Egress + egress: + - ports: + - protocol: TCP + port: 6443 From f89ddde2b8d542ad0e6b4d3aa63c55cdb3b5bbab Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 11:34:41 +0100 Subject: [PATCH 013/160] feat(ingress): add network policies --- .../manifests/ingress/kustomization.yaml.tpl | 4 + .../cert-manager/cert-manager.yaml.tpl | 74 +++++++++++++++++++ .../policies/cert-manager/common.yaml.tpl | 39 ++++++++++ .../cert-manager/kustomization.yaml.tpl | 12 +++ .../cert-manager/prometheus-metrics.yaml.tpl | 27 +++++++ .../policies/ingress-nginx/common.yaml.tpl | 38 ++++++++++ .../ingress-nginx/forecastle.yaml.tpl | 46 ++++++++++++ .../ingress-nginx/kustomization.yaml.tpl | 13 ++++ .../nginx-ingress-controller.yaml.tpl | 45 +++++++++++ .../ingress-nginx/prometheus-metrics.yaml.tpl | 30 ++++++++ .../ingress/policies/kustomization.yaml.tpl | 15 ++++ 11 files changed, 343 insertions(+) create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl create mode 100644 templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl diff --git a/templates/distribution/manifests/ingress/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/kustomization.yaml.tpl index ad0a7dae4..2309f735b 100644 --- a/templates/distribution/manifests/ingress/kustomization.yaml.tpl +++ b/templates/distribution/manifests/ingress/kustomization.yaml.tpl @@ -24,6 +24,10 @@ resources: {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + {{- if ne .spec.distribution.modules.ingress.nginx.type "none" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/ingress/katalog/forecastle" }} {{- end }} diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl new file mode 100644 index 000000000..9ab8a9575 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl @@ -0,0 +1,74 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# https://cert-manager.io/docs/installation/best-practice/#network-requirements +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: certmanager-egress-kubeapiserver + namespace: cert-manager +spec: + podSelector: + matchLabels: + app.kubernetes.io/instance: cert-manager + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: certmanagerwebhook-ingress-kubeapiserver + namespace: cert-manager +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + policyTypes: + - Ingress + ingress: + - ports: + - port: 10250 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: certmanager-egress-https + namespace: cert-manager +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + policyTypes: + - Egress + egress: + - ports: + - port: 443 + protocol: TCP + - port: 80 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: acmehttpsolver-ingress-letsencrypt + namespace: pomerium +spec: + podSelector: + matchLabels: + app: cert-manager + policyTypes: + - Ingress + ingress: + - ports: + - port: 8089 + protocol: TCP +--- diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl new file mode 100644 index 000000000..73a006f07 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl @@ -0,0 +1,39 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: cert-manager +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: cert-manager +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + # https://cert-manager.io/docs/installation/best-practice/#network-requirements \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl new file mode 100644 index 000000000..b71d8d27f --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/kustomization.yaml.tpl @@ -0,0 +1,12 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - cert-manager.yaml + - prometheus-metrics.yaml diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..3141cf9e8 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl @@ -0,0 +1,27 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: certmanager-ingress-prometheusmetrics + namespace: cert-manager +spec: + podSelector: + matchLabels: + app.kubernetes.io/component: controller + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9402 + protocol: TCP diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl new file mode 100644 index 000000000..39daf71e7 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl @@ -0,0 +1,38 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: ingress-nginx +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: ingress-nginx +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 +--- diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl new file mode 100644 index 000000000..c1719f289 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl @@ -0,0 +1,46 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: forecastle-ingress-nginxingresscontroller + namespace: ingress-nginx +spec: + podSelector: + matchLabels: + app: forecastle + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 3000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: forecastle-egress-kubeapiserver + namespace: ingress-nginx +spec: + podSelector: + matchLabels: + app: forecastle + policyTypes: + - Egress + egress: + - ports: + - port: 6443 + protocol: TCP \ No newline at end of file diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl new file mode 100644 index 000000000..fba4b8119 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl @@ -0,0 +1,13 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml + - forecastle.yaml + - nginx-ingress-controller.yaml + - prometheus-metrics.yaml diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl new file mode 100644 index 000000000..a20ff2696 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl @@ -0,0 +1,45 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: nginxingresscontroller-egress-all + namespace: ingress-nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + policyTypes: + - Egress + egress: + - {} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-ingress-nginxingresscontroller + namespace: ingress-nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ingress: + - ports: + - port: 8080 + protocol: TCP + - port: 8443 + protocol: TCP + - port: 9443 + protocol: TCP + policyTypes: + - Ingress diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..7f91521f6 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl @@ -0,0 +1,30 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: nginx-ingress-prometheusmetrics + namespace: ingress-nginx +spec: + podSelector: + matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 10254 diff --git a/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..f4820fc72 --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +{{ if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" -}} + - cert-manager +{{ end }} +{{- if ne .spec.distribution.modules.ingress.nginx.type "none" }} + - ingress-nginx +{{ end }} \ No newline at end of file From f3849ebffaa014796427bb6017e1a25157c44572 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 11:36:02 +0100 Subject: [PATCH 014/160] feat(auth): add network policies --- .../manifests/auth/kustomization.yaml.tpl | 4 + .../auth/policies/acme-http-solver.yaml.tpl | 28 +++ .../manifests/auth/policies/common.yaml.tpl | 38 ++++ .../auth/policies/kustomization.yaml.tpl | 15 ++ .../manifests/auth/policies/pomerium.yaml.tpl | 205 ++++++++++++++++++ .../auth/policies/prometheus-metrics.yaml.tpl | 28 +++ 6 files changed, 318 insertions(+) create mode 100644 templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/common.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/kustomization.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/pomerium.yaml.tpl create mode 100644 templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl diff --git a/templates/distribution/manifests/auth/kustomization.yaml.tpl b/templates/distribution/manifests/auth/kustomization.yaml.tpl index 5ab2396a8..a1e43808c 100644 --- a/templates/distribution/manifests/auth/kustomization.yaml.tpl +++ b/templates/distribution/manifests/auth/kustomization.yaml.tpl @@ -17,6 +17,10 @@ resources: - resources/ingress-infra.yml {{- end }} +{{ if eq .spec.distribution.common.networkPoliciesEnabled true }} + - policies +{{- end }} + patchesStrategicMerge: - patches/infra-nodes.yml - patches/pomerium-ingress.yml diff --git a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl new file mode 100644 index 000000000..fc12594de --- /dev/null +++ b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl @@ -0,0 +1,28 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: acmehttpsolver-ingress-nginxingresscontroller + namespace: pomerium +spec: + podSelector: + matchLabels: + app: cert-manager + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx + ports: + - port: 8089 + protocol: TCP +--- diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl new file mode 100644 index 000000000..f8f0ada65 --- /dev/null +++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl @@ -0,0 +1,38 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: deny-all + namespace: pomerium +spec: + podSelector: {} + policyTypes: + - Egress + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-dns + namespace: pomerium +spec: + podSelector: + matchLabels: {} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 +--- diff --git a/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl new file mode 100644 index 000000000..b5e04bf45 --- /dev/null +++ b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl @@ -0,0 +1,15 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - common.yaml +{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} + - acme-http-solver.yaml + - pomerium.yaml + - prometheus-metrics.yaml +{{- end }} diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl new file mode 100644 index 000000000..7e5a5beff --- /dev/null +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -0,0 +1,205 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-ingress-nginxingresscontroller + namespace: pomerium +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: pomerium + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-https + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-grafana + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/component: grafana + ports: + - port: 3000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-prometheus + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - port: 9090 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-alertmanager + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + alertmanager: main + ports: + - port: 9093 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-forecastle + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: forecastle + ports: + - port: 3000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-gatekeeperpolicymanager + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: gatekeeper-system + podSelector: + matchLabels: + app: gatekeeper-policy-manager + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-hubbleui + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + app.kubernetes.io/name: hubble-ui + ports: + - port: 8081 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-opensearchdashboard + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP +--- diff --git a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl new file mode 100644 index 000000000..0e107a7a7 --- /dev/null +++ b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl @@ -0,0 +1,28 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-ingress-prometheusmetrics + namespace: pomerium +spec: + podSelector: + matchLabels: + app: pomerium + policyTypes: + - Ingress + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ports: + - protocol: TCP + port: 9090 +--- From f3d5b574aee6ece35c98942dbf9b80629e2667e9 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Mon, 11 Nov 2024 12:56:36 +0100 Subject: [PATCH 015/160] fix(network-policies): fix syntax --- .../policies/ingress-nginx/forecastle.yaml.tpl | 9 ++++++++- .../ingress/policies/kustomization.yaml.tpl | 2 +- .../manifests/logging/policies/loki.yaml.tpl | 6 ++++++ .../policies/blackbox-exporter.yaml.tpl | 1 + .../monitoring/policies/ingress.yaml.tpl | 18 +++++++++--------- 5 files changed, 25 insertions(+), 11 deletions(-) diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl index c1719f289..73c697f20 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl @@ -16,11 +16,18 @@ spec: ingress: - from: - namespaceSelector: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + matchLabels: + kubernetes.io/metadata.name: pomerium +{{ else }} matchLabels: kubernetes.io/metadata.name: ingress-nginx +{{- end }} podSelector: matchLabels: -{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + app: pomerium +{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} app: ingress {{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} app: ingress-nginx diff --git a/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl index f4820fc72..22b97ea52 100644 --- a/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/kustomization.yaml.tpl @@ -7,7 +7,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: -{{ if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" -}} +{{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }} - cert-manager {{ end }} {{- if ne .spec.distribution.modules.ingress.nginx.type "none" }} diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl index 612f970d8..57207cd10 100644 --- a/templates/distribution/manifests/logging/policies/loki.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -28,6 +28,8 @@ spec: ports: - port: 8080 protocol: TCP + - port: 3100 + protocol: TCP --- apiVersion: networking.k8s.io/v1 @@ -76,6 +78,8 @@ spec: protocol: TCP - port: 3100 protocol: TCP + - port: 7946 + protocol: TCP from: - namespaceSelector: matchLabels: @@ -89,6 +93,8 @@ spec: protocol: TCP - port: 3100 protocol: TCP + - port: 7946 + protocol: TCP to: - namespaceSelector: matchLabels: diff --git a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl index 4546ba928..165f7d94b 100644 --- a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl @@ -3,6 +3,7 @@ # license that can be found in the LICENSE file. #ย source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/blackboxExporter-networkPolicy.yaml +--- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl index 9124b3bab..58f17e36a 100644 --- a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -27,7 +27,7 @@ spec: matchLabels: app: ingress-nginx # dual nginx, no sso -{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} - from: - namespaceSelector: matchLabels: @@ -57,9 +57,9 @@ metadata: spec: podSelector: matchLabels: - app.kubernetes.io/component: alert-router - app.kubernetes.io/instance: main - app.kubernetes.io/name: alertmanager + app.kubernetes.io/component: prometheus + app.kubernetes.io/instance: k8s + app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: kube-prometheus policyTypes: - Ingress @@ -74,7 +74,7 @@ spec: matchLabels: app: ingress-nginx # dual nginx, no sso -{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} - from: - namespaceSelector: matchLabels: @@ -104,9 +104,9 @@ metadata: spec: podSelector: matchLabels: - app.kubernetes.io/component: prometheus - app.kubernetes.io/instance: k8s - app.kubernetes.io/name: prometheus + app.kubernetes.io/component: alert-router + app.kubernetes.io/instance: main + app.kubernetes.io/name: alertmanager app.kubernetes.io/part-of: kube-prometheus policyTypes: - Ingress @@ -121,7 +121,7 @@ spec: matchLabels: app: ingress-nginx # dual nginx, no sso -{{ else if (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} - from: - namespaceSelector: matchLabels: From dd0b5913188a13e8f3cf3f475b2f0638d7f50c89 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 15:02:16 +0100 Subject: [PATCH 016/160] feat(pomerium): add minio network policies --- .../manifests/auth/policies/pomerium.yaml.tpl | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 7e5a5beff..527301384 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -203,3 +203,26 @@ spec: - port: 9200 protocol: TCP --- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-minio + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: minio + ports: + - port: 9001 + protocol: TCP +--- From 748839f0b517868500b62358f17f2b81aa7f054c Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Mon, 11 Nov 2024 15:27:55 +0100 Subject: [PATCH 017/160] feat(network-policies): add minio policies --- .../manifests/logging/policies/minio.yaml.tpl | 67 ++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl index d39e31fa1..d70b5c3a8 100644 --- a/templates/distribution/manifests/logging/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl @@ -104,4 +104,69 @@ spec: kubernetes.io/metadata.name: monitoring podSelector: matchLabels: - app.kubernetes.io/name: prometheus \ No newline at end of file + app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-egress-https + namespace: logging + labels: + app: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-nginxingresscontroller + namespace: logging + labels: + app: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: +# single nginx, no sso +{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9001 + protocol: TCP +--- From e503131cefda7b69dc62f1064adadd072df2a683 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Mon, 11 Nov 2024 15:28:11 +0100 Subject: [PATCH 018/160] feat(network-policies): add loki policy --- .../manifests/logging/policies/loki.yaml.tpl | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl index 57207cd10..3740d2193 100644 --- a/templates/distribution/manifests/logging/policies/loki.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -30,6 +30,32 @@ spec: protocol: TCP - port: 3100 protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: loki-distributed-ingress-grafana + namespace: logging + labels: + app.kubernetes.io/name: loki-distributed +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: loki-distributed + app.kubernetes.io/component: gateway + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: grafana + ports: + - port: 8080 + protocol: TCP --- apiVersion: networking.k8s.io/v1 From 8cf0846b2bdf5b1cc7b26d9ef09f2cf68d6612eb Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 15:49:51 +0100 Subject: [PATCH 019/160] feat(pomerium): add minio tracing network policy --- .../manifests/auth/policies/pomerium.yaml.tpl | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 527301384..9845808ec 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -206,7 +206,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-minio + name: pomerium-egress-miniologging namespace: pomerium spec: policyTypes: @@ -226,3 +226,26 @@ spec: - port: 9001 protocol: TCP --- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: pomerium-egress-miniotracing + namespace: pomerium +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: pomerium + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app: minio + ports: + - port: 9001 + protocol: TCP +--- From 775262e69cd29ec4a86d118a54358835611f4644 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Mon, 11 Nov 2024 16:36:07 +0100 Subject: [PATCH 020/160] feat(tracing): add minio network policies --- .../manifests/tracing/policies/minio.yaml.tpl | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl index 809000ec8..6a74fc6ed 100644 --- a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -99,3 +99,47 @@ spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-pomerium + namespace: tracing + labels: + app.kubernetes.io/name: minio-tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium + ports: + - port: 9001 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-egress-https + namespace: tracing + labels: + app: minio +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- \ No newline at end of file From dd9174e2586fc16a1869313bf63fc88a187abef8 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Mon, 11 Nov 2024 17:58:00 +0100 Subject: [PATCH 021/160] fix(network-policies): ingress,tracing --- .../cert-manager/cert-manager.yaml.tpl | 2 ++ .../policies/cert-manager/common.yaml.tpl | 9 +---- .../manifests/tracing/policies/minio.yaml.tpl | 33 +++++++++++++++---- .../manifests/tracing/policies/tempo.yaml.tpl | 31 +++++++++++++++++ 4 files changed, 61 insertions(+), 14 deletions(-) diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl index 9ab8a9575..ebe36547d 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl @@ -56,6 +56,7 @@ spec: - port: 80 protocol: TCP --- +{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -72,3 +73,4 @@ spec: - port: 8089 protocol: TCP --- +{{- end }} diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl index 73a006f07..d85dea771 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl @@ -24,14 +24,7 @@ spec: policyTypes: - Egress egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: kube-system - podSelector: - matchLabels: - k8s-app: kube-dns - ports: + - ports: - protocol: UDP port: 53 - protocol: TCP diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl index 6a74fc6ed..eed3d3315 100644 --- a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -114,13 +114,34 @@ spec: matchLabels: app: minio ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: pomerium - podSelector: - matchLabels: - app: pomerium + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} ports: - port: 9001 protocol: TCP diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index 15bf01aed..9590559e8 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -18,6 +18,8 @@ spec: protocol: TCP - port: 7946 protocol: TCP + - port: 3100 + protocol: TCP from: - namespaceSelector: matchLabels: @@ -31,6 +33,8 @@ spec: protocol: TCP - port: 7946 protocol: TCP + - port: 3100 + protocol: TCP to: - namespaceSelector: matchLabels: @@ -41,6 +45,33 @@ spec: --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy +metadata: + name: tempo-distributed-ingress-grafana + namespace: tracing + labels: + app.kubernetes.io/name: tempo +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: gateway + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app.kubernetes.io/name: grafana + ports: + - port: 8080 + protocol: TCP + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy metadata: name: tempo-distributed-ingress-prometheus-metrics namespace: tracing From 43c1433bf31c25128235950d3a544641cd0d9e2f Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 12 Nov 2024 14:34:33 +0100 Subject: [PATCH 022/160] feat(network-policies): add auth network policies --- .../manifests/auth/policies/kustomization.yaml.tpl | 2 +- .../distribution/manifests/auth/policies/pomerium.yaml.tpl | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl index b5e04bf45..49e948a8d 100644 --- a/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/kustomization.yaml.tpl @@ -6,9 +6,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} resources: - common.yaml -{{- if eq .spec.distribution.modules.auth.provider.type "sso" }} - acme-http-solver.yaml - pomerium.yaml - prometheus-metrics.yaml diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 9845808ec..16c6647ce 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -198,10 +198,12 @@ spec: kubernetes.io/metadata.name: logging podSelector: matchLabels: - app.kubernetes.io/name: opensearch + app: opensearch-dashboards ports: - port: 9200 protocol: TCP + - port: 5601 + protocol: TCP --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy From beabc079114f986764ab11a3e031c478604ba048 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 12 Nov 2024 14:34:52 +0100 Subject: [PATCH 023/160] feat(network-policies): add logging network policies --- .../policies/opensearch-dashboards.yaml.tpl | 59 ++++++++++++++++++- .../logging/policies/opensearch.yaml.tpl | 51 ++++++++++++++++ 2 files changed, 109 insertions(+), 1 deletion(-) diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl index 4fa8cb920..c919cb4a8 100644 --- a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl @@ -27,4 +27,61 @@ spec: ports: - port: 9200 protocol: TCP - \ No newline at end of file +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearchdashboards-ingress-jobs + namespace: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: opensearch-dashboards + release: opensearch-dashboards + ingress: + - from: + - podSelector: + matchExpressions: + - key: batch.kubernetes.io/job-name + operator: Exists + ports: + - port: 5601 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearchdashboards-ingress-nginx + namespace: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: opensearch-dashboards + ingress: + - from: + - namespaceSelector: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + matchLabels: + kubernetes.io/metadata.name: pomerium +{{ else }} + matchLabels: + kubernetes.io/metadata.name: ingress-nginx +{{- end }} + podSelector: + matchLabels: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + app: pomerium +{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 5601 + protocol: TCP +--- + diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl index 2bc23a6c7..8641db6f5 100644 --- a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl @@ -114,3 +114,54 @@ spec: ports: - port: 9108 protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: opensearch-ingress-jobs + namespace: logging +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ingress: + - from: + - podSelector: + matchExpressions: + - key: batch.kubernetes.io/job-name + operator: Exists + ports: + - port: 9200 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: jobs-egress-opensearch + namespace: logging +spec: + policyTypes: + - Egress + podSelector: + matchExpressions: + - key: batch.kubernetes.io/job-name + operator: Exists + egress: + - to: + - podSelector: + matchLabels: + app: opensearch-dashboards + release: opensearch-dashboards + ports: + - port: 5601 + protocol: TCP + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: opensearch + ports: + - port: 9200 + protocol: TCP +--- \ No newline at end of file From 77210227482c8966c32ef8cf3ace84d31538fad0 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 12 Nov 2024 14:35:40 +0100 Subject: [PATCH 024/160] feat(network-policies): add monitoring network policies --- .../monitoring/policies/grafana.yaml.tpl | 39 +++++- .../monitoring/policies/mimir.yaml.tpl | 59 +++++++++ .../monitoring/policies/minio.yaml.tpl | 18 ++- .../policies/prometheus-adapter.yaml.tpl | 25 ++++ .../monitoring/policies/prometheus.yaml.tpl | 116 ++++++++++++++++++ 5 files changed, 255 insertions(+), 2 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl index fc7dcfc78..c17ff307b 100644 --- a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -2,6 +2,8 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} + # source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/grafana-networkPolicy.yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -27,4 +29,39 @@ spec: policyTypes: - Egress - Ingress - \ No newline at end of file +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: grafana-ingress-nginx + namespace: monitoring +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: grafana + app.kubernetes.io/component: grafana + ingress: + - from: + - namespaceSelector: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + matchLabels: + kubernetes.io/metadata.name: pomerium +{{ else }} + matchLabels: + kubernetes.io/metadata.name: ingress-nginx +{{- end }} + podSelector: + matchLabels: +{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} + app: pomerium +{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} + app: ingress-nginx +{{- end }} + ports: + - port: 3000 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl index 09aec2061..28657ab04 100644 --- a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl @@ -73,6 +73,65 @@ spec: podSelector: matchLabels: app.kubernetes.io/name: mimir +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimirgateway-ingress-grafana + namespace: monitoring +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: grafana + app.kubernetes.io/component: grafana + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimirquerier-egress-all + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + app.kubernetes.io/component: querier + egress: + - ports: + - port: 443 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: mimiringester-egress-all + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: mimir-distributed + app.kubernetes.io/name: mimir + app.kubernetes.io/component: ingester + egress: + - ports: + - port: 443 + protocol: TCP {{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} --- apiVersion: networking.k8s.io/v1 diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl index 0f58a8dad..020f31698 100644 --- a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -105,4 +105,20 @@ spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus - \ No newline at end of file +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: miniomonitoring-egress-all + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app: minio + egress: + - ports: + - port: 443 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl index 01f6f2320..1add6a429 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl @@ -21,3 +21,28 @@ spec: policyTypes: - Egress - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-ingress-prometheusadapter + namespace: monitoring + labels: + app.kubernetes.io/name: prometheus +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/component: metrics-adapter + app.kubernetes.io/name: prometheus-adapter + app.kubernetes.io/part-of: kube-prometheus + ports: + - port: 9090 + protocol: TCP +--- diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl index 7cc8e2444..acaa1bd64 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl @@ -2,6 +2,8 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. +{{- $monitoringType := .spec.distribution.modules.monitoring.type }} + # source: https://github.com/prometheus-operator/kube-prometheus/blob/main/manifests/prometheus-networkPolicy.yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -44,3 +46,117 @@ spec: policyTypes: - Egress - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-minio + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - to: + - podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +{{- if eq $monitoringType "mimir" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-mimir + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: k8s + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/component: gateway + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: mimir-distributed + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-kubeapiserver + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - ports: + - port: 6443 + protocol: TCP + - port: 8405 + protocol: TCP +--- +{{- if eq .spec.distribution.modules.monitoring.mimir.backend "minio" }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-miniologging + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: logging + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: prometheus-egress-miniomonitoring + namespace: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: prometheus + app.kubernetes.io/instance: k8s + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: monitoring + podSelector: + matchLabels: + app: minio + ports: + - port: 9000 + protocol: TCP +--- +{{- end }} +{{- end }} + From 0f3ba97abc1070d0d6d0093a6fa5834a9d833b8a Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 12 Nov 2024 14:36:54 +0100 Subject: [PATCH 025/160] fix(network-policies): remove condition from prometheus include --- .../manifests/monitoring/policies/kustomization.yaml.tpl | 2 -- 1 file changed, 2 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl index 4c7d44a53..0fa4c4391 100644 --- a/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/kustomization.yaml.tpl @@ -19,8 +19,6 @@ resources: - alertmanager.yaml - prometheus-adapter.yaml - grafana.yaml -{{- end }} -{{- if eq $monitoringType "prometheus" }} - prometheus.yaml {{- end }} {{- if eq $monitoringType "mimir" }} From 11724b2501ba2da297440be4348ac08583ba57b3 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Wed, 13 Nov 2024 17:59:07 +0100 Subject: [PATCH 026/160] feat(schemas): add loki migration to tsdb and schema v13 section --- schemas/public/ekscluster-kfd-v1alpha2.json | 21 +++++++++++++++++++ .../public/kfddistribution-kfd-v1alpha2.json | 21 +++++++++++++++++++ schemas/public/onpremises-kfd-v1alpha2.json | 21 +++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 49e3379dc..75eacc6dc 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1533,6 +1533,27 @@ }, "resources": { "$ref": "#/$defs/Types.KubeResources" + }, + "tsdbSchemav13Migration": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" + }, + "schemaConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + } + } + } + } } } }, diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 3e4451b36..52f483bc9 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -600,6 +600,27 @@ }, "resources": { "$ref": "#/$defs/Types.KubeResources" + }, + "tsdbSchemav13Migration": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" + }, + "schemaConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + } + } + } + } } } }, diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index cc808f71e..d2c786ad5 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1184,6 +1184,27 @@ }, "resources": { "$ref": "#/$defs/Types.KubeResources" + }, + "tsdbSchemav13Migration": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" + }, + "schemaConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + } + } + } + } } } }, From 91bbc7beb5396554d1c4e90da7aea7f3d4e9d608 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Wed, 13 Nov 2024 18:13:59 +0100 Subject: [PATCH 027/160] feat(templates): update loki-config.tpl to allow new tsdb v13 schema conf --- .../manifests/logging/patches/loki-config.yaml.tpl | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl index f1851f754..b76f9316d 100644 --- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl +++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl @@ -77,6 +77,15 @@ schema_config: object_store: s3 schema: v11 store: boltdb-shipper +{{- if .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.enabled }} + - from: {{ .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.tsdbStartDate }} + index: + period: 24h + prefix: index_ + object_store: s3 + schema: v13 + store: tsdb +{{- end }} server: http_listen_port: 3100 storage_config: From df79fd787e041c4e339258e60b29563361683ab7 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Thu, 14 Nov 2024 14:30:39 +0100 Subject: [PATCH 028/160] feat(tracing): bump to v1.1.0 - bump tracing version to v1.1.0. - drop deprecated fields from tempo's configuration file template. - align nomenclature in minio's root credentials to follow the breaking change introduced in the release of the module. --- kfd.yaml | 2 +- .../distribution/manifests/tracing/patches/minio.root.env.tpl | 4 ++-- .../distribution/manifests/tracing/patches/tempo.yaml.tpl | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index 576f2bd1c..357ab8594 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -12,7 +12,7 @@ modules: monitoring: v3.2.0 opa: v1.12.0 networking: v1.17.0 - tracing: v1.0.3 + tracing: v1.1.0 kubernetes: eks: version: 1.29 diff --git a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl index a63a82680..333ad7378 100644 --- a/templates/distribution/manifests/tracing/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/tracing/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.tracing.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.tracing.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.tracing.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.tracing.minio.rootUser.username }} diff --git a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl index b1cd52196..99ac37a25 100644 --- a/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/patches/tempo.yaml.tpl @@ -86,13 +86,12 @@ querier: trace_by_id: query_timeout: 10s query_frontend: + max_outstanding_per_tenant: 2000 max_retries: 2 search: concurrent_jobs: 1000 target_bytes_per_job: 104857600 trace_by_id: - hedge_requests_at: 2s - hedge_requests_up_to: 2 query_shards: 50 server: grpc_server_max_recv_msg_size: 4194304 From b6c8df3f6e3f83eedbdaeb32b7e740e48d379290 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:18:59 +0100 Subject: [PATCH 029/160] feat(network-policies): update name,add labels on auth --- .../auth/policies/acme-http-solver.yaml.tpl | 8 ++- .../manifests/auth/policies/common.yaml.tpl | 6 +- .../manifests/auth/policies/pomerium.yaml.tpl | 56 +++++++++++++------ .../auth/policies/prometheus-metrics.yaml.tpl | 6 +- 4 files changed, 55 insertions(+), 21 deletions(-) diff --git a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl index fc12594de..c714cc9b1 100644 --- a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl @@ -6,8 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: acmehttpsolver-ingress-nginxingresscontroller + name: acme-httpsolver-ingress-nginx namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: matchLabels: @@ -21,7 +23,11 @@ spec: kubernetes.io/metadata.name: ingress-nginx podSelector: matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} app: ingress-nginx +{{- end }} ports: - port: 8089 protocol: TCP diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl index f8f0ada65..f83763d25 100644 --- a/templates/distribution/manifests/auth/policies/common.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: {} policyTypes: @@ -17,8 +19,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: all-egress-dns + name: all-egress-kube-dns namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 16c6647ce..6b85dae35 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -6,8 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-ingress-nginxingresscontroller + name: pomerium-ingress-nginx namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Ingress @@ -31,6 +33,8 @@ kind: NetworkPolicy metadata: name: pomerium-egress-https namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -45,8 +49,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-grafana - namespace: pomerium + name: pomerium-egress-grafana + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -68,8 +74,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-prometheus - namespace: pomerium + name: pomerium-egress-prometheus + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -91,8 +99,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-alertmanager - namespace: pomerium + name: pomerium-egress-alert-manager + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -114,8 +124,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-forecastle - namespace: pomerium + name: pomerium-egress-forecastle + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -137,8 +149,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-gatekeeperpolicymanager - namespace: pomerium + name: pomerium-egress-gpm + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -160,8 +174,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-hubbleui - namespace: pomerium + name: pomerium-egress-hubble-ui + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -183,8 +199,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-opensearchdashboard - namespace: pomerium + name: pomerium-egress-opensearch-dashboard + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -208,8 +226,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-miniologging + name: pomerium-egress-minio-logging namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress @@ -231,8 +251,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-miniotracing + name: pomerium-egress-minio-tracing namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl index 0e107a7a7..d07701c1b 100644 --- a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl @@ -6,8 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-ingress-prometheusmetrics - namespace: pomerium + name: pomerium-ingress-prometheus-metrics + namespace: pomerium + labels: + cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: matchLabels: From 59e983a41b85c148b3c9a3d0bf4d16f5a72ad716 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:19:17 +0100 Subject: [PATCH 030/160] feat(network-policies): update name,add labels on ingress --- .../policies/cert-manager/cert-manager.yaml.tpl | 16 ++++++++++++---- .../policies/cert-manager/common.yaml.tpl | 6 +++++- .../cert-manager/prometheus-metrics.yaml.tpl | 4 +++- .../policies/ingress-nginx/common.yaml.tpl | 6 +++++- .../policies/ingress-nginx/forecastle.yaml.tpl | 8 ++++++-- .../nginx-ingress-controller.yaml.tpl | 8 ++++++-- .../ingress-nginx/prometheus-metrics.yaml.tpl | 4 +++- 7 files changed, 40 insertions(+), 12 deletions(-) diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl index ebe36547d..a56236684 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl @@ -7,8 +7,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: certmanager-egress-kubeapiserver + name: cert-manager-egress-kube-apiserver namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: @@ -23,8 +25,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: certmanagerwebhook-ingress-kubeapiserver + name: cert-manager-webhook-ingress-kube-apiserver namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: @@ -40,8 +44,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: certmanager-egress-https + name: cert-manager-egress-https namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: @@ -60,8 +66,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: acmehttpsolver-ingress-letsencrypt + name: acme-http-solver-ingress-lets-encrypt namespace: pomerium + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl index d85dea771..5a88dfc1a 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl @@ -7,6 +7,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: {} policyTypes: @@ -16,8 +18,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: all-egress-dns + name: all-egress-kube-dns namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl index 3141cf9e8..c12250d6b 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl @@ -6,8 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: certmanager-ingress-prometheusmetrics + name: cert-manager-ingress-prometheus-metrics namespace: cert-manager + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl index 39daf71e7..34d8dc144 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: {} policyTypes: @@ -17,8 +19,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: all-egress-dns + name: all-egress-kube-dns namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl index 73c697f20..7d48f92a5 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl @@ -5,8 +5,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: forecastle-ingress-nginxingresscontroller + name: forecastle-ingress-nginx namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: @@ -39,8 +41,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: forecastle-egress-kubeapiserver + name: forecastle-egress-kube-apiserver namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl index a20ff2696..59a8d404e 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl @@ -5,8 +5,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: nginxingresscontroller-egress-all + name: nginx-egress-all namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: @@ -23,8 +25,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: all-ingress-nginxingresscontroller + name: all-ingress-nginx namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl index 7f91521f6..8caece613 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl @@ -5,8 +5,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: nginx-ingress-prometheusmetrics + name: nginx-ingress-prometheus-metrics namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: matchLabels: From d3ffc4a71926ea563d715c5b9418ac664340f46e Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:19:49 +0100 Subject: [PATCH 031/160] feat(network-policies): update name,add labels on logging --- .../logging/policies/common.yaml.tpl | 6 +++++- .../logging/policies/configs.yaml.tpl | 4 ++-- .../logging/policies/fluentbit.yaml.tpl | 8 ++++---- .../logging/policies/fluentd.yaml.tpl | 12 +++++++----- .../policies/logging-operator.yaml.tpl | 4 ++-- .../manifests/logging/policies/loki.yaml.tpl | 19 +++++++++++-------- .../manifests/logging/policies/minio.yaml.tpl | 16 ++++++++-------- .../policies/opensearch-dashboards.yaml.tpl | 15 +++++++++++---- .../logging/policies/opensearch.yaml.tpl | 18 ++++++++++++++---- 9 files changed, 64 insertions(+), 38 deletions(-) diff --git a/templates/distribution/manifests/logging/policies/common.yaml.tpl b/templates/distribution/manifests/logging/policies/common.yaml.tpl index 12cc3830b..6fd8ddaad 100644 --- a/templates/distribution/manifests/logging/policies/common.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: logging + labels: + cluster.kfd.sighup.io/module: logging spec: podSelector: {} policyTypes: @@ -17,8 +19,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: allow-dns-access + name: all-egress-kube-dns namespace: logging + labels: + cluster.kfd.sighup.io/module: logging spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/logging/policies/configs.yaml.tpl b/templates/distribution/manifests/logging/policies/configs.yaml.tpl index aa35827c7..05ff5e2d5 100644 --- a/templates/distribution/manifests/logging/policies/configs.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/configs.yaml.tpl @@ -6,10 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: event-tailer-egress-apiserver + name: event-tailer-egress-kube-apiserver namespace: logging labels: - app.kubernetes.io/name: event-tailer + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl index 9213c688a..48f6095a0 100644 --- a/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/fluentbit.yaml.tpl @@ -9,7 +9,7 @@ metadata: name: fluentbit-egress-fluentd namespace: logging labels: - app.kubernetes.io/name: fluentbit + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -25,10 +25,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: fluentbit-egress-apiserver + name: fluentbit-egress-kube-apiserver namespace: logging labels: - app.kubernetes.io/name: fluentbit + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -46,7 +46,7 @@ metadata: name: fluentbit-ingress-prometheus-metrics namespace: logging labels: - app.kubernetes.io/name: fluentbit + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl index 6faba450c..10fcd9437 100644 --- a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl @@ -9,7 +9,7 @@ metadata: name: fluentd-ingress-fluentbit namespace: logging labels: - app.kubernetes.io/name: fluentd + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress @@ -34,7 +34,7 @@ metadata: name: fluentd-egress-minio namespace: logging labels: - app.kubernetes.io/name: fluentd + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -59,7 +59,7 @@ metadata: name: fluentd-ingress-prometheus-metrics namespace: logging labels: - app.kubernetes.io/name: fluentd + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress @@ -85,7 +85,8 @@ metadata: name: fluentd-egress-opensearch namespace: logging labels: - app.kubernetes.io/name: fluentd + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Egress @@ -109,7 +110,8 @@ metadata: name: fluentd-egress-loki namespace: logging labels: - app.kubernetes.io/name: fluentd + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl index 7aa37e605..bc0a2cccd 100644 --- a/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/logging-operator.yaml.tpl @@ -6,10 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: logging-operator-egress-apiserver + name: logging-operator-egress-kube-apiserver namespace: logging labels: - app.kubernetes.io/name: logging-operator + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl index 3740d2193..0afe2222c 100644 --- a/templates/distribution/manifests/logging/policies/loki.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -9,7 +9,8 @@ metadata: name: loki-distributed-ingress-fluentd namespace: logging labels: - app.kubernetes.io/name: loki-distributed + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Ingress @@ -37,7 +38,8 @@ metadata: name: loki-distributed-ingress-grafana namespace: logging labels: - app.kubernetes.io/name: loki-distributed + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Ingress @@ -56,7 +58,6 @@ spec: ports: - port: 8080 protocol: TCP - --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -64,7 +65,8 @@ metadata: name: loki-distributed-ingress-prometheus-metrics namespace: logging labels: - app.kubernetes.io/name: loki-distributed + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Ingress @@ -82,7 +84,6 @@ spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus - --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -90,7 +91,8 @@ metadata: name: loki-distributed-discovery namespace: logging labels: - app.kubernetes.io/name: loki-distributed + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Ingress @@ -135,7 +137,8 @@ metadata: name: loki-distributed-egress-minio namespace: logging labels: - app.kubernetes.io/name: loki-distributed + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: loki spec: policyTypes: - Egress @@ -152,4 +155,4 @@ spec: kubernetes.io/metadata.name: logging ports: - port: 9000 - protocol: TCP \ No newline at end of file + protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl index d70b5c3a8..c979320e8 100644 --- a/templates/distribution/manifests/logging/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl @@ -9,7 +9,7 @@ metadata: name: minio-ingress-namespace namespace: logging labels: - app: minio + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress @@ -40,10 +40,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: minio-buckets-setup-egress-apiserver + name: minio-buckets-setup-egress-kube-apiserver namespace: logging labels: - app: minio-logging-buckets-setup + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -61,7 +61,7 @@ metadata: name: minio-buckets-setup-egress-minio namespace: logging labels: - app: minio-logging-buckets-setup + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -87,7 +87,7 @@ metadata: name: minio-ingress-prometheus-metrics namespace: logging labels: - app: minio + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress @@ -112,7 +112,7 @@ metadata: name: minio-egress-https namespace: logging labels: - app: minio + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Egress @@ -127,10 +127,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: minio-ingress-nginxingresscontroller + name: minio-ingress-nginx namespace: logging labels: - app: minio + cluster.kfd.sighup.io/module: logging spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl index c919cb4a8..0b10c7bce 100644 --- a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl @@ -9,7 +9,8 @@ metadata: name: opensearch-dashboards-egress-opensearch namespace: logging labels: - app: opensearch-dashboards + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Egress @@ -31,8 +32,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: opensearchdashboards-ingress-jobs + name: opensearch-dashboards-ingress-jobs namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -53,8 +57,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: opensearchdashboards-ingress-nginx - namespace: logging + name: opensearch-dashboards-ingress-nginx + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl index 8641db6f5..2ddcd18b2 100644 --- a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl @@ -9,7 +9,8 @@ metadata: name: opensearch-ingress-dashboards namespace: logging labels: - app.kubernetes.io/name: opensearch + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -34,7 +35,8 @@ metadata: name: opensearch-ingress-fluentd namespace: logging labels: - app.kubernetes.io/name: opensearch + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -59,7 +61,8 @@ metadata: name: opensearch-discovery namespace: logging labels: - app.kubernetes.io/name: opensearch + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -96,7 +99,8 @@ metadata: name: opensearch-ingress-prometheus-metrics namespace: logging labels: - app.kubernetes.io/name: opensearch + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -120,6 +124,9 @@ kind: NetworkPolicy metadata: name: opensearch-ingress-jobs namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Ingress @@ -141,6 +148,9 @@ kind: NetworkPolicy metadata: name: jobs-egress-opensearch namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Egress From f21d06a5fb263656eac12832f221f97fed512787 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:20:03 +0100 Subject: [PATCH 032/160] feat(network-policies): update name,add labels on monitoring --- .../monitoring/policies/alertmanager.yaml.tpl | 2 ++ .../policies/blackbox-exporter.yaml.tpl | 2 ++ .../monitoring/policies/common.yaml.tpl | 6 +++- .../monitoring/policies/grafana.yaml.tpl | 30 ++++++++++++++++++ .../monitoring/policies/ingress.yaml.tpl | 12 +++++-- .../policies/kube-state-metrics.yaml.tpl | 2 ++ .../monitoring/policies/mimir.yaml.tpl | 31 +++++++++++++------ .../monitoring/policies/minio.yaml.tpl | 2 +- .../policies/node-exporter.yaml.tpl | 2 ++ .../policies/prometheus-adapter.yaml.tpl | 6 ++-- .../policies/prometheus-operator.yaml.tpl | 2 ++ .../monitoring/policies/prometheus.yaml.tpl | 6 +++- .../policies/x509-exporter.yaml.tpl | 6 ++-- 13 files changed, 89 insertions(+), 20 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl index 3070f3092..2ed8a7215 100644 --- a/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/alertmanager.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: alertmanager-main namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} diff --git a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl index 165f7d94b..c8b4745c7 100644 --- a/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/blackbox-exporter.yaml.tpl @@ -9,6 +9,8 @@ kind: NetworkPolicy metadata: name: blackbox-exporter namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} diff --git a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl index fe3565120..9ca8ec757 100644 --- a/templates/distribution/manifests/monitoring/policies/common.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: podSelector: {} policyTypes: @@ -17,8 +19,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: allow-dns-access + name: all-egress-kube-dns namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl index c17ff307b..5510e600d 100644 --- a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -10,6 +10,8 @@ kind: NetworkPolicy metadata: name: grafana namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} @@ -32,9 +34,37 @@ spec: --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy +metadata: + name: grafana-egress-tempo-gateway + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: grafana + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: tracing + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: gateway + ports: + - port: 8080 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy metadata: name: grafana-ingress-nginx namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl index 58f17e36a..c1d6ad1ac 100644 --- a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -6,8 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: grafana-ingress-nginxingresscontroller + name: grafana-ingress-nginx namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: podSelector: matchLabels: @@ -52,8 +54,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: prometheus-ingress-nginxingresscontroller + name: prometheus-ingress-nginx namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: podSelector: matchLabels: @@ -99,8 +103,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: alertmanager-ingress-nginxingresscontroller + name: alertmanager-ingress-nginx namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl index b38a925b8..0851cf907 100644 --- a/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/kube-state-metrics.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: kube-state-metrics namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} diff --git a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl index 28657ab04..77dd0149d 100644 --- a/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/mimir.yaml.tpl @@ -9,7 +9,8 @@ metadata: name: mimir-distributed-ingress-prometheus-metrics namespace: monitoring labels: - app.kubernetes.io/name: mimir + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Ingress @@ -35,7 +36,8 @@ metadata: name: mimir-distributed-discovery namespace: monitoring labels: - app.kubernetes.io/name: mimir + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Ingress @@ -77,8 +79,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: mimirgateway-ingress-grafana + name: mimir-gateway-ingress-grafana namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Ingress @@ -100,8 +105,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: mimirquerier-egress-all - namespace: monitoring + name: mimir-querier-egress-https + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Egress @@ -118,8 +126,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: mimiringester-egress-all - namespace: monitoring + name: mimir-ingester-egress-https + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Egress @@ -140,7 +151,8 @@ metadata: name: mimir-distributed-egress-minio namespace: monitoring labels: - app.kubernetes.io/name: mimir + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Egress @@ -166,7 +178,8 @@ metadata: name: mimir-distributed-egress-all namespace: monitoring labels: - app.kubernetes.io/name: mimir + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-type: mimir spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl index 020f31698..14217ef2c 100644 --- a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -40,7 +40,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: minio-buckets-setup-egress-apiserver + name: minio-buckets-setup-egress-kube-apiserver namespace: monitoring labels: app: minio-monitoring-buckets-setup diff --git a/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl index 0e8649e62..4b06c7ece 100644 --- a/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/node-exporter.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: node-exporter namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl index 1add6a429..7f26d2dd5 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus-adapter.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: prometheus-adapter namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} @@ -25,10 +27,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: prometheus-ingress-prometheusadapter + name: prometheus-ingress-prometheus-adapter namespace: monitoring labels: - app.kubernetes.io/name: prometheus + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl index efea3a9cf..d33974f30 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus-operator.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: prometheus-operator namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: egress: - {} diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl index acaa1bd64..89365bb22 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl @@ -115,6 +115,8 @@ kind: NetworkPolicy metadata: name: prometheus-egress-miniologging namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Egress @@ -136,8 +138,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: prometheus-egress-miniomonitoring + name: prometheus-egress-minio-monitoring namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl index 519ecc98b..a89c3f207 100644 --- a/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/x509-exporter.yaml.tpl @@ -6,10 +6,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: x509-exporter-egress-apiserver + name: x509-exporter-egress-kube-apiserver namespace: monitoring labels: - app: x509-certificate-exporter + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Egress @@ -27,7 +27,7 @@ metadata: name: x509-exporter-ingress-prometheus-metrics namespace: monitoring labels: - app: x509-certificate-exporter + cluster.kfd.sighup.io/module: monitoring spec: policyTypes: - Ingress From bcc4f587aeffe26d856c1c3e4c34c24c993f2b5e Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:20:22 +0100 Subject: [PATCH 033/160] feat(network-policies): update name,add labels on opa --- .../opa/policies/gatekeeper/audit.yaml.tpl | 5 +++- .../opa/policies/gatekeeper/common.yaml.tpl | 4 +++ .../gatekeeper/controller-manager.yaml.tpl | 5 +++- .../gatekeeper-policy-manager.yaml.tpl | 10 +++++-- .../gatekeeper/prometheus-metrics.yaml.tpl | 5 +++- .../opa/policies/kyverno/common.yaml.tpl | 4 +++ .../opa/policies/kyverno/kyverno.yaml.tpl | 30 +++++++++++++++---- 7 files changed, 52 insertions(+), 11 deletions(-) diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl index 53cdeb2c5..10f8a1e52 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/audit.yaml.tpl @@ -5,8 +5,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: auditcontroller-egress-apiserver + name: audit-controller-egress-kube-apiserver namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl index 99acf05f9..eeda4d0d5 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa spec: podSelector: {} policyTypes: @@ -19,6 +21,8 @@ kind: NetworkPolicy metadata: name: all-egress-dns namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl index 81371c828..95cc5ec22 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl @@ -21,8 +21,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: controllermanager-ingress-kubeapiserver + name: controller-manager-ingress-kube-apiserver namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl index 2e894493b..84557ba3e 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/gatekeeper-policy-manager.yaml.tpl @@ -5,8 +5,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: gatekeeperpolicymanager-egress-kubeapiserver + name: gpm-egress-kube-apiserver namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: @@ -21,8 +24,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: gatekeeperpolicymanager-ingress-gatekeeper + name: gpm-ingress-pomerium namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl index 389e63871..44cd7a68b 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/prometheus-metrics.yaml.tpl @@ -5,8 +5,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: gatekeeper-ingress-prometheusmetrics + name: gatekeeper-ingress-prometheus-metrics namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl index a620ebf7e..fed35d22d 100644 --- a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl @@ -7,6 +7,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa spec: podSelector: {} policyTypes: @@ -18,6 +20,8 @@ kind: NetworkPolicy metadata: name: all-egress-dns namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl index 4246589b1..ff8c06b24 100644 --- a/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/kyverno/kyverno.yaml.tpl @@ -5,8 +5,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernoadmission-egress-kubeapiserver + name: kyverno-admission-egress-kube-apiserver namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: @@ -21,8 +24,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernoadmission-ingress-nodes + name: kyverno-admission-ingress-nodes namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: @@ -37,8 +43,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernobackground-egress-kubeapiserver + name: kyverno-background-egress-kube-apiserver namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: @@ -53,8 +62,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernoreports-egress-kubeapiserver + name: kyverno-reports-egress-kube-apiserver namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: @@ -69,8 +81,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernocleanup-egress-kubeapiserver + name: kyverno-cleanup-egress-kube-apiserver namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: @@ -85,8 +100,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: kyvernocleanupreports-egress-kubeapiserver + name: kyverno-cleanup-reports-egress-kube-apiserver namespace: kyverno + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchExpressions: From 2c3111135ccec9df01508ff5cc2aeeb74eb2a562 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 14 Nov 2024 17:20:39 +0100 Subject: [PATCH 034/160] feat(network-policies): update name,add labels on tracing --- .../tracing/policies/common.yaml.tpl | 6 +- .../manifests/tracing/policies/minio.yaml.tpl | 14 +- .../manifests/tracing/policies/tempo.yaml.tpl | 125 ++++++++++++++++-- 3 files changed, 129 insertions(+), 16 deletions(-) diff --git a/templates/distribution/manifests/tracing/policies/common.yaml.tpl b/templates/distribution/manifests/tracing/policies/common.yaml.tpl index a61acfccc..6727129eb 100644 --- a/templates/distribution/manifests/tracing/policies/common.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/common.yaml.tpl @@ -8,6 +8,8 @@ kind: NetworkPolicy metadata: name: deny-all namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing spec: podSelector: {} policyTypes: @@ -17,8 +19,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: allow-dns-access + name: all-egress-kube-dns namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl index eed3d3315..be0c1ea31 100644 --- a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: minio-ingress-namespace namespace: tracing labels: - app: minio + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress @@ -35,10 +35,10 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: minio-buckets-setup-egress-apiserver + name: minio-buckets-setup-egress-kube-apiserver namespace: tracing labels: - app: minio-tracing-buckets-setup + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Egress @@ -56,7 +56,7 @@ metadata: name: minio-buckets-setup-egress-minio namespace: tracing labels: - app: minio-tracing-buckets-setup + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Egress @@ -81,7 +81,7 @@ metadata: name: minio-ingress-prometheus-metrics namespace: tracing labels: - app: minio + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress @@ -106,7 +106,7 @@ metadata: name: minio-ingress-pomerium namespace: tracing labels: - app.kubernetes.io/name: minio-tracing + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress @@ -152,7 +152,7 @@ metadata: name: minio-egress-https namespace: tracing labels: - app: minio + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index 9590559e8..3756a9f4f 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: tempo-distributed-discovery namespace: tracing labels: - app.kubernetes.io/name: tempo + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress @@ -46,17 +46,18 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: tempo-distributed-ingress-grafana + name: tempo-gateway-ingress-grafana namespace: tracing labels: - app.kubernetes.io/name: tempo + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress podSelector: matchLabels: - app.kubernetes.io/name: tempo app.kubernetes.io/component: gateway + app.kubernetes.io/name: tempo + app.kubernetes.io/instance: tempo-distributed ingress: - from: - namespaceSelector: @@ -64,11 +65,99 @@ spec: kubernetes.io/metadata.name: monitoring podSelector: matchLabels: + app.kubernetes.io/component: grafana app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus ports: - port: 8080 protocol: TCP - +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-egress-tempo-distributor + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: {} + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: distributor + ports: + - port: 4317 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempo-distributor-ingress-traces + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: distributor + ingress: + - ports: + - port: 4317 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempocomponents-egress-memcached + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: tempo-distributed + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: memcached + ports: + - port: 11211 + protocol: TCP +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: memcached-ingress-querier + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: memcached + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: querier + ports: + - port: 11211 + protocol: TCP --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -76,7 +165,7 @@ metadata: name: tempo-distributed-ingress-prometheus-metrics namespace: tracing labels: - app.kubernetes.io/name: tempo + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Ingress @@ -94,6 +183,25 @@ spec: podSelector: matchLabels: app.kubernetes.io/name: prometheus +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: tempocomponents-egress-https + namespace: tracing + labels: + cluster.kfd.sighup.io/module: tracing +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/instance: tempo-distributed + egress: + - ports: + - port: 443 + protocol: TCP {{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} --- apiVersion: networking.k8s.io/v1 @@ -102,7 +210,7 @@ metadata: name: tempo-distributed-egress-minio namespace: tracing labels: - app.kubernetes.io/name: tempo + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Egress @@ -128,7 +236,7 @@ metadata: name: tempo-distributed-egress-all namespace: tracing labels: - app.kubernetes.io/name: tempo + cluster.kfd.sighup.io/module: tracing spec: policyTypes: - Egress @@ -138,3 +246,4 @@ spec: egress: - {} {{- end }} +--- From fdaf8994d2db4ed3336a9d4ad38940c386bc25c5 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Thu, 14 Nov 2024 17:47:08 +0100 Subject: [PATCH 035/160] fix(templates): fix ingress tlsSecret name generation - Fix bug introduced in #294 that broke templates for ingresses. - Change the logic to generate the tlsSecret name, instead of using the package name use the prefix (the first part of the ingress hostname). This will change the tlsSecret name for the several MinIOs ingresses that we have from `minio` to `minio-ingress`, `minio-logging`, `minio-monitoring`. As a side effect Forecastle tlsSecretName will be changed too from `forecastle` to `directory`. --- templates/distribution/_helpers.tpl | 2 +- .../manifests/logging/resources/ingress-infra.yml.tpl | 4 ++-- .../manifests/monitoring/resources/ingress-infra.yml.tpl | 2 +- .../manifests/tracing/resources/ingress-infra.yml.tpl | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/templates/distribution/_helpers.tpl b/templates/distribution/_helpers.tpl index 335146356..597415e52 100644 --- a/templates/distribution/_helpers.tpl +++ b/templates/distribution/_helpers.tpl @@ -125,7 +125,7 @@ - hosts: - {{ template "ingressHost" . }} {{- if eq .spec.distribution.modules.ingress.nginx.tls.provider "certManager" }} - secretName: {{ lower .package }}-tls + secretName: {{ lower .prefix | trimSuffix "." }}-tls {{- end }} {{- end }} {{- end -}} diff --git a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl index d2bb75201..83a453240 100644 --- a/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/logging/resources/ingress-infra.yml.tpl @@ -65,7 +65,7 @@ metadata: forecastle.stakater.com/icon: "https://min.io/resources/img/logo/MINIO_Bird.png" {{ if not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth }}{{ template "ingressAuth" . }}{{ end }} {{ template "certManagerClusterIssuer" . }} - + {{ if and (not .spec.distribution.modules.logging.overrides.ingresses.minio.disableAuth) (eq .spec.distribution.modules.auth.provider.type "sso") }} name: minio-logging namespace: pomerium @@ -93,4 +93,4 @@ spec: port: name: http {{ end }} -{{- template "ingressTls" (dict "module" "logging" "package" "minio-logging" "prefix" "minio-logging." "spec" .spec) }} +{{- template "ingressTls" (dict "module" "logging" "package" "minio" "prefix" "minio-logging." "spec" .spec) }} diff --git a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl index 0ea55cad5..3d4f1fbe2 100644 --- a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl @@ -181,7 +181,7 @@ spec: port: name: http {{ end }} -{{- template "ingressTls" (dict "module" "monitoring" "package" "minio-monitoring" "prefix" "minio-monitoring." "spec" .spec) }} +{{- template "ingressTls" (dict "module" "monitoring" "package" "minio" "prefix" "minio-monitoring." "spec" .spec) }} {{- end }} {{- end }} diff --git a/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl index 362180241..adff73665 100644 --- a/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/tracing/resources/ingress-infra.yml.tpl @@ -46,4 +46,4 @@ spec: port: name: http {{ end }} -{{- template "ingressTls" (dict "module" "tracing" "package" "minio-tracing" "prefix" "minio-tracing." "spec" .spec) }} +{{- template "ingressTls" (dict "module" "tracing" "package" "minio" "prefix" "minio-tracing." "spec" .spec) }} From 06b97bb87cd83f98bea2794dca7636c33b81d2a1 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 14 Nov 2024 17:57:37 +0100 Subject: [PATCH 036/160] feat(network-policies): add common labels --- .../auth/policies/acme-http-solver.yaml.tpl | 1 + .../manifests/auth/policies/common.yaml.tpl | 1 + .../manifests/auth/policies/pomerium.yaml.tpl | 11 ++++++++ .../auth/policies/prometheus-metrics.yaml.tpl | 1 + .../cert-manager/cert-manager.yaml.tpl | 4 +++ .../policies/cert-manager/common.yaml.tpl | 2 ++ .../cert-manager/prometheus-metrics.yaml.tpl | 1 + .../policies/ingress-nginx/common.yaml.tpl | 2 ++ .../ingress-nginx/forecastle.yaml.tpl | 2 ++ .../nginx-ingress-controller.yaml.tpl | 2 ++ .../ingress-nginx/prometheus-metrics.yaml.tpl | 1 + .../opa/policies/gatekeeper/common.yaml.tpl | 2 ++ .../gatekeeper/controller-manager.yaml.tpl | 3 ++ .../opa/policies/kyverno/common.yaml.tpl | 28 ++++++++++--------- 14 files changed, 48 insertions(+), 13 deletions(-) diff --git a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl index c714cc9b1..0f7a8a246 100644 --- a/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/acme-http-solver.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: acme-httpsolver-ingress-nginx namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl index f83763d25..dfe83bd10 100644 --- a/templates/distribution/manifests/auth/policies/common.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: deny-all namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: {} diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 6b85dae35..caa2f1522 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: pomerium-ingress-nginx namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -34,6 +35,7 @@ metadata: name: pomerium-egress-https namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -52,6 +54,7 @@ metadata: name: pomerium-egress-grafana namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -77,6 +80,7 @@ metadata: name: pomerium-egress-prometheus namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -102,6 +106,7 @@ metadata: name: pomerium-egress-alert-manager namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -127,6 +132,7 @@ metadata: name: pomerium-egress-forecastle namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -152,6 +158,7 @@ metadata: name: pomerium-egress-gpm namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -177,6 +184,7 @@ metadata: name: pomerium-egress-hubble-ui namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -202,6 +210,7 @@ metadata: name: pomerium-egress-opensearch-dashboard namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -229,6 +238,7 @@ metadata: name: pomerium-egress-minio-logging namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: @@ -254,6 +264,7 @@ metadata: name: pomerium-egress-minio-tracing namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: policyTypes: diff --git a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl index d07701c1b..355ca48dd 100644 --- a/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/prometheus-metrics.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: pomerium-ingress-prometheus-metrics namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl index a56236684..bbc937c2b 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/cert-manager.yaml.tpl @@ -10,6 +10,7 @@ metadata: name: cert-manager-egress-kube-apiserver namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: @@ -28,6 +29,7 @@ metadata: name: cert-manager-webhook-ingress-kube-apiserver namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: @@ -47,6 +49,7 @@ metadata: name: cert-manager-egress-https namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: @@ -69,6 +72,7 @@ metadata: name: acme-http-solver-ingress-lets-encrypt namespace: pomerium labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl index 5a88dfc1a..963b7db18 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/common.yaml.tpl @@ -8,6 +8,7 @@ metadata: name: deny-all namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: {} @@ -21,6 +22,7 @@ metadata: name: all-egress-kube-dns namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl index c12250d6b..c329f39e5 100644 --- a/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/cert-manager/prometheus-metrics.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: cert-manager-ingress-prometheus-metrics namespace: cert-manager labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl index 34d8dc144..d1a1f295a 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/common.yaml.tpl @@ -9,6 +9,7 @@ metadata: name: deny-all namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: {} @@ -22,6 +23,7 @@ metadata: name: all-egress-kube-dns namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl index 7d48f92a5..c223b5b3d 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/forecastle.yaml.tpl @@ -8,6 +8,7 @@ metadata: name: forecastle-ingress-nginx namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: @@ -44,6 +45,7 @@ metadata: name: forecastle-egress-kube-apiserver namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl index 59a8d404e..164cb229c 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/nginx-ingress-controller.yaml.tpl @@ -8,6 +8,7 @@ metadata: name: nginx-egress-all namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: @@ -28,6 +29,7 @@ metadata: name: all-ingress-nginx namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl index 8caece613..f070b9d54 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/prometheus-metrics.yaml.tpl @@ -8,6 +8,7 @@ metadata: name: nginx-ingress-prometheus-metrics namespace: ingress-nginx labels: + cluster.kfd.sighup.io/module: ingress cluster.kfd.sighup.io/ingress-type: nginx spec: podSelector: diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl index eeda4d0d5..ad51c243a 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/common.yaml.tpl @@ -10,6 +10,7 @@ metadata: namespace: gatekeeper-system labels: cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: {} policyTypes: @@ -23,6 +24,7 @@ metadata: namespace: gatekeeper-system labels: cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: {} diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl index 95cc5ec22..1821377de 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl @@ -7,6 +7,9 @@ kind: NetworkPolicy metadata: name: controllermanager-egress-kubeapiserver namespace: gatekeeper-system + labels: + cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: gatekeeper spec: podSelector: matchLabels: diff --git a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl index fed35d22d..ccb1424a9 100644 --- a/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/kyverno/common.yaml.tpl @@ -9,11 +9,12 @@ metadata: namespace: kyverno labels: cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: {} policyTypes: - - Egress - - Ingress + - Egress + - Ingress --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -22,19 +23,20 @@ metadata: namespace: kyverno labels: cluster.kfd.sighup.io/module: opa + cluster.kfd.sighup.io/policy-type: kyverno spec: podSelector: matchLabels: {} policyTypes: - - Egress + - Egress egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: kube-system - podSelector: - matchLabels: - k8s-app: kube-dns - ports: - - protocol: UDP - port: 53 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 From a1e116228d109f54aa53f2a9aae45e87bcd1d370 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Thu, 14 Nov 2024 17:58:12 +0100 Subject: [PATCH 037/160] feat(network-policies): add reducer --- rules/onpremises-kfd-v1alpha2.yaml | 8 ++++++++ templates/distribution/scripts/pre-apply.sh.tpl | 15 +++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/rules/onpremises-kfd-v1alpha2.yaml b/rules/onpremises-kfd-v1alpha2.yaml index 7afc29d61..dedf65668 100644 --- a/rules/onpremises-kfd-v1alpha2.yaml +++ b/rules/onpremises-kfd-v1alpha2.yaml @@ -13,6 +13,14 @@ kubernetes: - path: .spec.kubernetes.svcCidr immutable: true distribution: + - path: .spec.distribution.common.networkPoliciesEnabled + immutable: false + description: "changes to the network policies have been detected. This will cause the reconfiguration or deletion of the current network policies." + safe: + - to: none + reducers: + - key: distributionCommonNetworkPoliciesEnabled + lifecycle: pre-apply - path: .spec.distribution.modules.networking.type immutable: true - path: .spec.distribution.modules.logging.type diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl index a6bd3369b..2c59a7331 100644 --- a/templates/distribution/scripts/pre-apply.sh.tpl +++ b/templates/distribution/scripts/pre-apply.sh.tpl @@ -17,6 +17,21 @@ vendorPath="{{ .paths.vendorPath }}" # Text generated with: https://www.patorjk.com/software/taag/#p=display&f=ANSI%20Regular&t=TRACING%20TYPE +# โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ +# โ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ +# โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ +# โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ +# โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ + +{{- if index .reducers "distributionCommonNetworkPoliciesEnabled" }} + +{{- if eq .reducers.distributionCommonNetworkPoliciesEnabled.to false }} + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/module + echo "Network Policies deleted" +{{- end }} + +{{- end }} + # โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ # โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ # โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ From be0ba1884b99cab79d3dd856d8a3eb6cf35d88a8 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Fri, 15 Nov 2024 08:39:00 +0100 Subject: [PATCH 038/160] feat: update to the latest on-premises installer rc version --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index a5ae1d46e..d3d92ff4b 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -19,7 +19,7 @@ kubernetes: installer: v3.1.2 onpremises: version: 1.30.6 - installer: v1.30.6-rc.0 + installer: v1.30.6-rc.2 furyctlSchemas: eks: - apiVersion: kfd.sighup.io/v1alpha2 From 6b8fa4c3397bf4b5bb60034ba0a297bbac36b650 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Fri, 15 Nov 2024 09:07:38 +0100 Subject: [PATCH 039/160] feat(network-policies): add missing labels --- .../distribution/manifests/auth/policies/pomerium.yaml.tpl | 2 ++ .../manifests/logging/policies/fluentd.yaml.tpl | 1 + .../distribution/manifests/logging/policies/minio.yaml.tpl | 6 ++++++ .../distribution/manifests/tracing/policies/minio.yaml.tpl | 6 ++++++ .../distribution/manifests/tracing/policies/tempo.yaml.tpl | 1 + 5 files changed, 16 insertions(+) diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index caa2f1522..626cb2946 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -212,6 +212,7 @@ metadata: labels: cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso + cluster.kfd.sighup.io/logging-type: opensearch spec: policyTypes: - Egress @@ -240,6 +241,7 @@ metadata: labels: cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl index 10fcd9437..48bfd6a13 100644 --- a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl @@ -35,6 +35,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/logging/policies/minio.yaml.tpl b/templates/distribution/manifests/logging/policies/minio.yaml.tpl index c979320e8..09c6ffa34 100644 --- a/templates/distribution/manifests/logging/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/minio.yaml.tpl @@ -10,6 +10,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Ingress @@ -44,6 +45,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Egress @@ -62,6 +64,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Egress @@ -88,6 +91,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Ingress @@ -113,6 +117,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Egress @@ -131,6 +136,7 @@ metadata: namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - Ingress diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl index be0c1ea31..5089cd95b 100644 --- a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -5,6 +5,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Ingress @@ -39,6 +40,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Egress @@ -57,6 +59,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Egress @@ -82,6 +85,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Ingress @@ -107,6 +111,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Ingress @@ -153,6 +158,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Egress diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index 3756a9f4f..cc046bd3e 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -211,6 +211,7 @@ metadata: namespace: tracing labels: cluster.kfd.sighup.io/module: tracing + cluster.kfd.sighup.io/tracing-backend: minio spec: policyTypes: - Egress From f171b0cb72d2adf7c20296e3e38109a2c00cb00d Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Fri, 15 Nov 2024 09:13:33 +0100 Subject: [PATCH 040/160] feat(network-policies): add policies deletion during type migration --- templates/distribution/scripts/pre-apply.sh.tpl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl index 2c59a7331..bc803b95d 100644 --- a/templates/distribution/scripts/pre-apply.sh.tpl +++ b/templates/distribution/scripts/pre-apply.sh.tpl @@ -27,7 +27,7 @@ vendorPath="{{ .paths.vendorPath }}" {{- if eq .reducers.distributionCommonNetworkPoliciesEnabled.to false }} $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/module - echo "Network Policies deleted" + echo "KFD Network Policies deleted" {{- end }} {{- end }} @@ -56,6 +56,7 @@ deleteOpensearch() { $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch.yaml $kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=opensearch pvc -n logging --wait --timeout=180s + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=opensearch echo "OpenSearch resources deleted" } @@ -72,6 +73,7 @@ deleteLoki() { $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-loki.yaml $kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=loki-distributed pvc -n logging --wait --timeout=180s + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-type=loki echo "Loki resources deleted" } @@ -96,6 +98,7 @@ $kustomizebin build $vendorPath/modules/logging/katalog/minio-ha > delete-loggin fi {{- end }} $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-logging-minio-ha.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s networkpolicies -A -l cluster.kfd.sighup.io/logging-backend=minio echo "Minio Logging deleted" } @@ -180,7 +183,7 @@ deleteGatekeeper() { $kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/monitoring | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - {{- end }} $kustomizebin build $vendorPath/modules/opa/katalog/gatekeeper/core | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - - + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=gatekeeper echo "Gatekeeper resources deleted" } @@ -188,6 +191,7 @@ deleteKyverno() { $kustomizebin build $vendorPath/modules/opa/katalog/kyverno | $kubectlbin delete --ignore-not-found --wait --timeout=180s -f - $kubectlbin delete --ignore-not-found --wait --timeout=180s validatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno $kubectlbin delete --ignore-not-found --wait --timeout=180s mutatingwebhookconfiguration -l webhook.kyverno.io/managed-by=kyverno + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/policy-type=kyverno echo "Kyverno resources deleted" } @@ -310,6 +314,7 @@ deleteTracingMinioHA() { fi {{- end }} $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-tracing-minio-ha.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -A networkpolicy -l cluster.kfd.sighup.io/tracing-backend=minio echo "Minio HA on tracing namespace deleted" } From 47fb24580037ea5d3dc3d5aca0b6e8500e4cda78 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Fri, 15 Nov 2024 09:34:21 +0100 Subject: [PATCH 041/160] chore: bump kubectl version --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index d3d92ff4b..721fac4e7 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -35,7 +35,7 @@ tools: furyagent: version: 0.4.0 kubectl: - version: 1.30.5 + version: 1.30.6 kustomize: version: 3.10.0 terraform: From bfe1557491d0b6fe999d24f614765ceb497e7946 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Fri, 15 Nov 2024 10:18:13 +0100 Subject: [PATCH 042/160] chore(network-policies): remove from eks and kfd --- defaults/ekscluster-kfd-v1alpha2.yaml | 1 - defaults/kfddistribution-kfd-v1alpha2.yaml | 1 - docs/schemas/ekscluster-kfd-v1alpha2.md | 21 +++++++------------ docs/schemas/kfddistribution-kfd-v1alpha2.md | 21 +++++++------------ .../ekscluster/v1alpha2/private/schema.go | 3 --- pkg/apis/ekscluster/v1alpha2/public/schema.go | 3 --- .../kfddistribution/v1alpha2/public/schema.go | 3 --- schemas/private/ekscluster-kfd-v1alpha2.json | 4 ---- schemas/public/ekscluster-kfd-v1alpha2.json | 4 ---- .../public/kfddistribution-kfd-v1alpha2.json | 4 ---- 10 files changed, 14 insertions(+), 51 deletions(-) diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 3cfe3419d..93350d218 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -15,7 +15,6 @@ data: relativeVendorPath: "../../vendor" provider: type: eks - networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index 7e2f6531d..85c8443b1 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -15,7 +15,6 @@ data: relativeVendorPath: "../../vendor" provider: type: none - networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 1d147f794..a27ebdf41 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -84,20 +84,13 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Properties -| Property | Type | Required | -|:------------------------------------------------------------------------|:----------|:---------| -| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | - -## .spec.distribution.common.networkPoliciesEnabled - -### Description - -This field defines whether Network Policies are provided for all modules +| Property | Type | Required | +|:----------------------------------------------------------------|:---------|:---------| +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | ## .spec.distribution.common.nodeSelector diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index a26f82393..8b8240208 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -76,20 +76,13 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Properties -| Property | Type | Required | -|:------------------------------------------------------------------------|:----------|:---------| -| [networkPoliciesEnabled](#specdistributioncommonnetworkpoliciesenabled) | `boolean` | Optional | -| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | -| [provider](#specdistributioncommonprovider) | `object` | Optional | -| [registry](#specdistributioncommonregistry) | `string` | Optional | -| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | -| [tolerations](#specdistributioncommontolerations) | `array` | Optional | - -## .spec.distribution.common.networkPoliciesEnabled - -### Description - -This field defines whether Network Policies are provided for all modules +| Property | Type | Required | +|:----------------------------------------------------------------|:---------|:---------| +| [nodeSelector](#specdistributioncommonnodeselector) | `object` | Optional | +| [provider](#specdistributioncommonprovider) | `object` | Optional | +| [registry](#specdistributioncommonregistry) | `string` | Optional | +| [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | +| [tolerations](#specdistributioncommontolerations) | `array` | Optional | ## .spec.distribution.common.nodeSelector diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 8387312c4..4ea507871 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -89,9 +89,6 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` - - // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". - NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index b5f254855..ff34c16a3 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -89,9 +89,6 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` - - // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". - NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 96d1445b9..9a4f9ca9e 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -75,9 +75,6 @@ type SpecDistributionCommon struct { // The tolerations that will be added to the pods for all the KFD modules Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` - - // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". - NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 11dbd1c43..2aa905308 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -209,10 +209,6 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." - }, - "networkPoliciesEnabled": { - "type": "boolean", - "description": "This field defines whether Network Policies are provided for all modules" } } }, diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index e393abebc..49e3379dc 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1026,10 +1026,6 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." - }, - "networkPoliciesEnabled": { - "type": "boolean", - "description": "This field defines whether Network Policies are provided for all modules" } } }, diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 1358f70d8..3e4451b36 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -156,10 +156,6 @@ "registry": { "type": "string", "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." - }, - "networkPoliciesEnabled": { - "type": "boolean", - "description": "This field defines whether Network Policies are provided for all modules" } } }, From 7aa446e7ea6c5fed9ca66cd809f2a1b45e1ec595 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Fri, 15 Nov 2024 10:23:09 +0100 Subject: [PATCH 043/160] feat(network-policies): improve description --- docs/schemas/onpremises-kfd-v1alpha2.md | 2 +- schemas/public/onpremises-kfd-v1alpha2.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index c379b5147..e711ff1fb 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -97,7 +97,7 @@ Common configuration for all the distribution modules. ### Description -This field defines whether Network Policies are provided for all modules +EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules. ## .spec.distribution.common.nodeSelector diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index d5c217bb4..a76f541d0 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -730,7 +730,7 @@ }, "networkPoliciesEnabled": { "type": "boolean", - "description": "This field defines whether Network Policies are provided for all modules" + "description": "EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided for core modules." } } }, From 232026ed301a4524ee1f4db56bcf4c429e88dd3d Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Fri, 15 Nov 2024 14:37:41 +0100 Subject: [PATCH 044/160] fix(network-policies): move grafana network policy --- .../monitoring/policies/grafana.yaml.tpl | 37 +++++++++----- .../monitoring/policies/ingress.yaml.tpl | 48 ------------------- 2 files changed, 24 insertions(+), 61 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl index 5510e600d..cc7728597 100644 --- a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -66,31 +66,42 @@ metadata: labels: cluster.kfd.sighup.io/module: monitoring spec: - policyTypes: - - Ingress podSelector: matchLabels: - app.kubernetes.io/name: grafana app.kubernetes.io/component: grafana + app.kubernetes.io/name: grafana + app.kubernetes.io/part-of: kube-prometheus + policyTypes: + - Ingress ingress: +# single nginx, no sso +{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} - from: - namespaceSelector: -{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} matchLabels: - kubernetes.io/metadata.name: pomerium -{{ else }} + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: matchLabels: kubernetes.io/metadata.name: ingress-nginx -{{- end }} podSelector: matchLabels: -{{- if (eq .spec.distribution.modules.auth.provider.type "sso") }} - app: pomerium -{{- else if eq .spec.distribution.modules.ingress.nginx.type "dual" }} app: ingress -{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} - app: ingress-nginx -{{- end }} +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} ports: - port: 3000 protocol: TCP diff --git a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl index c1d6ad1ac..759609694 100644 --- a/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/ingress.yaml.tpl @@ -2,54 +2,6 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: grafana-ingress-nginx - namespace: monitoring - labels: - cluster.kfd.sighup.io/module: monitoring -spec: - podSelector: - matchLabels: - app.kubernetes.io/component: grafana - app.kubernetes.io/name: grafana - app.kubernetes.io/part-of: kube-prometheus - policyTypes: - - Ingress - ingress: -# single nginx, no sso -{{if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: ingress-nginx - podSelector: - matchLabels: - app: ingress-nginx -# dual nginx, no sso -{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: ingress-nginx - podSelector: - matchLabels: - app: ingress -# sso -{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} - - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: pomerium - podSelector: - matchLabels: - app: pomerium -{{ end }} - ports: - - port: 3000 - protocol: TCP --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy From 68aa6ad6b05f0545da6a34d28d4c6072d3984cf2 Mon Sep 17 00:00:00 2001 From: Manuel Romei Date: Fri, 15 Nov 2024 17:15:37 +0100 Subject: [PATCH 045/160] feat: add kapp entry in tools --- pkg/apis/config/model.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/apis/config/model.go b/pkg/apis/config/model.go index 52a55f81c..d48d1e1b8 100644 --- a/pkg/apis/config/model.go +++ b/pkg/apis/config/model.go @@ -69,6 +69,7 @@ type KFDToolsCommon struct { Kustomize KFDTool `yaml:"kustomize" validate:"required"` Terraform KFDTool `yaml:"terraform" validate:"required"` Yq KFDTool `yaml:"yq" validate:"required"` + Kapp KFDTool `yaml:"kapp"` Helm KFDTool `yaml:"helm"` Helmfile KFDTool `yaml:"helmfile"` } From e71cdc9abf268a57a73475cb0c2f667070567b23 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Fri, 15 Nov 2024 17:16:52 +0100 Subject: [PATCH 046/160] fix(pre-apply): fix yaml generation for migrations --- .../distribution/scripts/pre-apply.sh.tpl | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl index 2ab5185ec..409576e56 100644 --- a/templates/distribution/scripts/pre-apply.sh.tpl +++ b/templates/distribution/scripts/pre-apply.sh.tpl @@ -29,8 +29,8 @@ deleteOpensearch() { $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n logging opensearch-dashboards $kubectlbin delete --ignore-not-found --wait --timeout=180s ingress -n pomerium opensearch-dashboards - $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch.yaml - $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple >> delete-opensearch.yaml + $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-dashboards > delete-opensearch-dashboards.yaml + $kustomizebin build $vendorPath/modules/logging/katalog/opensearch-triple > delete-opensearch.yaml {{- if eq .spec.distribution.modules.monitoring.type "none" }} if ! $kubectlbin get apiservice v1.monitoring.coreos.com; then @@ -41,6 +41,7 @@ deleteOpensearch() { $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch.yaml $kubectlbin delete --ignore-not-found -l app.kubernetes.io/name=opensearch pvc -n logging --wait --timeout=180s + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-opensearch-dashboards.yaml echo "OpenSearch resources deleted" } @@ -558,8 +559,8 @@ deleteNginx() { $kustomizebin build $vendorPath/modules/ingress/katalog/nginx > delete-nginx.yaml $kustomizebin build $vendorPath/modules/ingress/katalog/dual-nginx > delete-dual-nginx.yaml - $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns.yaml - $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private >> delete-external-dns.yaml + $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/public > delete-external-dns-public.yaml + $kustomizebin build $vendorPath/modules/ingress/katalog/external-dns/private > delete-external-dns-private.yaml $kustomizebin build $vendorPath/modules/ingress/katalog/forecastle > delete-forecastle.yaml {{- if eq .spec.distribution.modules.monitoring.type "none" }} @@ -568,13 +569,16 @@ deleteNginx() { cp delete-nginx-filtered.yaml delete-nginx.yaml cat delete-dual-nginx.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-dual-nginx-filtered.yaml cp delete-dual-nginx-filtered.yaml delete-dual-nginx.yaml - cat delete-external-dns.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-filtered.yaml - cp delete-external-dns-filtered.yaml delete-external-dns.yaml + cat delete-external-dns-public.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-public-filtered.yaml + cp delete-external-dns-public-filtered.yaml delete-external-dns-public.yaml + cat delete-external-dns-private.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-external-dns-private-filtered.yaml + cp delete-external-dns-private-filtered.yaml delete-external-dns-private.yaml cat delete-forecastle.yaml | $yqbin 'select(.apiVersion != "monitoring.coreos.com/v1")' > delete-forecastle-filtered.yaml cp delete-forecastle-filtered.yaml delete-forecastle.yaml fi {{- end }} - $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-public.yaml + $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-external-dns-private.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-forecastle.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-dual-nginx.yaml $kubectlbin delete --ignore-not-found --wait --timeout=180s -f delete-nginx.yaml From 45a49b325453835e79f951e17b556c9596768e6f Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:40:08 +0100 Subject: [PATCH 047/160] feat: WIP v3.0.0-rc.1 velero testing, changing templates, schema and templates to include snapshot-controller deployment, and enabling volume snapshotting on OnPremises and KFDDistribution providers. Complete refactor of velero schedules settings, simplifying and improving the definition --- defaults/ekscluster-kfd-v1alpha2.yaml | 10 ++++++++++ defaults/kfddistribution-kfd-v1alpha2.yaml | 10 ++++++++++ defaults/onpremises-kfd-v1alpha2.yaml | 10 ++++++++++ kfd.yaml | 2 +- .../manifests/dr/kustomization.yaml.tpl | 8 ++++---- .../manifests/dr/patches/infra-nodes.yml.tpl | 17 +++++++++++++++++ .../dr/patches/velero-schedule-full.yml.tpl | 12 ++++-------- .../patches/velero-schedule-manifests.yml.tpl | 10 ++-------- .../resources/volumeSnapshotLocation.yaml.tpl | 16 ++++++++++++++++ 9 files changed, 74 insertions(+), 21 deletions(-) create mode 100644 templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 6c708be00..12bf6492d 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -253,6 +253,16 @@ data: bucketName: velerobucket schedules: install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index d0c790257..39ab2fbb5 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -240,6 +240,16 @@ data: bucketName: velerobucket schedules: install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml index f26ad1e6e..755885e26 100644 --- a/defaults/onpremises-kfd-v1alpha2.yaml +++ b/defaults/onpremises-kfd-v1alpha2.yaml @@ -240,6 +240,16 @@ data: bucketName: velerobucket schedules: install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + snapshotController: + install: false # auth module configuration auth: overrides: diff --git a/kfd.yaml b/kfd.yaml index 48cceb0db..517792086 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -6,7 +6,7 @@ version: v1.30.0 modules: auth: v0.3.0 aws: v4.2.1 - dr: v2.3.0 + dr: v3.0.0-rc.1 ingress: v2.3.3 logging: v3.4.1 monitoring: v3.2.0 diff --git a/templates/distribution/manifests/dr/kustomization.yaml.tpl b/templates/distribution/manifests/dr/kustomization.yaml.tpl index f2cd569a7..5368284d4 100644 --- a/templates/distribution/manifests/dr/kustomization.yaml.tpl +++ b/templates/distribution/manifests/dr/kustomization.yaml.tpl @@ -16,6 +16,7 @@ resources: {{- else }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-aws" }} - resources/storageLocation.yaml + - resources/volumeSnapshotLocation.yaml {{- end }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-node-agent" }} @@ -23,6 +24,9 @@ resources: {{- if .spec.distribution.modules.dr.velero.schedules.install }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-schedules" }} {{- end }} +{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} + - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/snapshot-controller" }} +{{- end }} {{- if eq .spec.distribution.common.provider.type "eks" }} - resources/eks-velero-backupstoragelocation.yml - resources/eks-velero-volumesnapshotlocation.yml @@ -34,13 +38,9 @@ patchesStrategicMerge: - patches/eks-velero.yml {{- end }} {{- if .spec.distribution.modules.dr.velero.schedules.install }} -{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests")) }} - patches/velero-schedule-manifests.yml -{{- end }} -{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full")) }} - patches/velero-schedule-full.yml {{- end }} -{{- end }} {{- if eq .spec.distribution.common.provider.type "none" }} {{- if eq .spec.distribution.modules.dr.velero.backend "externalEndpoint" }} diff --git a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl index 271394159..834150ac0 100644 --- a/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl +++ b/templates/distribution/manifests/dr/patches/infra-nodes.yml.tpl @@ -61,4 +61,21 @@ spec: {{ template "tolerations" $veleroArgs }} {{- end }} + +{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: snapshot-controller + namespace: kube-system +spec: + template: + spec: + nodeSelector: + {{ template "nodeSelector" $veleroArgs }} + tolerations: + {{ template "tolerations" $veleroArgs }} +{{- end }} + {{- end }} \ No newline at end of file diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl index 5bf3de289..b572e61ba 100644 --- a/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl +++ b/templates/distribution/manifests/dr/patches/velero-schedule-full.yml.tpl @@ -2,7 +2,6 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full")) }} --- apiVersion: velero.io/v1 kind: Schedule @@ -10,11 +9,8 @@ metadata: name: full namespace: kube-system spec: - {{- if and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "full") }} - schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.full }} - {{- end }} - {{- if index .spec.distribution.modules.dr.velero.schedules "ttl" }} + schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule }}" template: - ttl: {{ .spec.distribution.modules.dr.velero.schedules.ttl }} - {{- end }} -{{- end }} + ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl }}" + snapshotMoveData: {{ .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData }} + diff --git a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl index c08006331..3441eb7fc 100644 --- a/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl +++ b/templates/distribution/manifests/dr/patches/velero-schedule-manifests.yml.tpl @@ -2,7 +2,6 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -{{- if or (index .spec.distribution.modules.dr.velero.schedules "ttl") (and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests")) }} --- apiVersion: velero.io/v1 kind: Schedule @@ -10,11 +9,6 @@ metadata: name: manifests namespace: kube-system spec: - {{- if and (index .spec.distribution.modules.dr.velero.schedules "cron") (index .spec.distribution.modules.dr.velero.schedules.cron "manifests") }} - schedule: {{ .spec.distribution.modules.dr.velero.schedules.cron.manifests }} - {{- end }} - {{- if index .spec.distribution.modules.dr.velero.schedules "ttl" }} + schedule: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule }}" template: - ttl: {{ .spec.distribution.modules.dr.velero.schedules.ttl }} - {{- end }} -{{- end }} + ttl: "{{ .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl }}" diff --git a/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl new file mode 100644 index 000000000..5679203f9 --- /dev/null +++ b/templates/distribution/manifests/dr/resources/volumeSnapshotLocation.yaml.tpl @@ -0,0 +1,16 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: velero.io/v1 +kind: VolumeSnapshotLocation +metadata: + name: default + namespace: kube-system + labels: + k8s-app: velero +spec: + config: + region: custom + provider: aws \ No newline at end of file From 3ed10e245d7466f4f922633b4c5b91e95e54093f Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:41:15 +0100 Subject: [PATCH 048/160] feat: schema change to reflect new schedule definition, with added snapshotMoveData field. Adding also snapshotController installation flag --- schemas/public/onpremises-kfd-v1alpha2.json | 53 +++++++++++++++++---- 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index cc808f71e..098cbb22b 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1847,24 +1847,57 @@ "type": "boolean", "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." }, - "cron": { + "definitions": { "type": "object", "additionalProperties": false, - "description": "Configuration for Velero's schedules cron.", + "description": "Configuration for Velero schedules.", "properties": { "manifests": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } }, "full": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } } } - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "snapshotController": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the additional snapshotController component installation.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in." } } }, From 2cefaebb2b82e063a4f0429a174729aed939d74c Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:41:27 +0100 Subject: [PATCH 049/160] docs: add WIP release note with informations on DR changes --- docs/releases/v1.30.0.md | 103 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 docs/releases/v1.30.0.md diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md new file mode 100644 index 000000000..5049a4726 --- /dev/null +++ b/docs/releases/v1.30.0.md @@ -0,0 +1,103 @@ +# Kubernetes Fury Distribution Release v1.30.0 + +Welcome to KFD release `v1.30.0`. + +The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.io/) it is battle tested in production environments. + +## New Features since `v1.29.4` + +### Installer Updates + +- [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐Ÿ“ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) + - TBD +- [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.X.X**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.X.X) + - TBD + +### Module updates + +- [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/vX.X.X) + - TBD +- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/vX.X.X) + - TBD +- [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/vX.X.X) + - TBD +- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/vX.X.X) + - TBD +- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/vX.X.X) + - TBD +- [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/vX.X.X) + - TBD +- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/vX.X.X) + - TBD +- [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/vX.X.X) + - TBD + +## New features ๐ŸŒŸ + +- **DR improved configurable schedules**: The schedule configuration has been updated to enhance the usability of schedule customization (note: **this is a breaking change**): + ```yaml + ... + dr: + velero: + schedules: + install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false + ... + ``` +- **DR snapshotMoveData options for full schedule**: A new parameter has been introduced in the velero `full` schedule to enable the snapshotMoveData feature. This feature allows data captured from a snapshot to be copied to the object storage location. Important: Enabling this parameter will cause Velero to upload all data from the snapshotted volumes to S3 using Kopia. While backups are deduplicated, significant storage usage is still expected. To enable this parameter in the full schedule: + ```yaml + ... + dr: + velero: + schedules: + install: true + definitions: + full: + snapshotMoveData: false + ... + ``` +General example to enable Volume Snapshotting on rook-ceph (from our storage add-on module): + ```yaml + apiVersion: snapshot.storage.k8s.io/v1 + kind: VolumeSnapshotClass + metadata: + name: velero-snapclass + labels: + velero.io/csi-volumesnapshot-class: "true" + driver: rook-ceph.rbd.csi.ceph.com + parameters: + clusterID: rook-ceph + csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph + deletionPolicy: Retain + ``` +`deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide volumesnapshotcontent CR will be preserved, maintaining the snapshot on the storage that the cluster is using. +- **DR optional snapshot-controller installation**: To leverage VolumeSnapshots on the OnPremises and KFDDistribution providers, a new option on velero has been added to install the snapshot-controller component. Before activating this parameter make sure that in your cluster there is not another snapshot-controller component deployed. By default this parameter is `false`. + ```yaml + ... + dr: + velero: + snapshotController: + install: true + ... + ``` + +## Fixes ๐Ÿž + +- **TBD**: TBD. + + +## Breaking changes ๐Ÿ’” + +- **DR Schema change**: A new format for the schedule customization has been introduced to improve the usability. See New Features section for more informations. + +## Upgrade procedure + +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. From d9bc4c96a9789f886a49059ff87fa1b5a8e09149 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:47:07 +0100 Subject: [PATCH 050/160] feat: align eks and kfddistro with new schedule settings --- schemas/public/ekscluster-kfd-v1alpha2.json | 69 +++++++++---------- .../public/kfddistribution-kfd-v1alpha2.json | 53 +++++++++++--- 2 files changed, 76 insertions(+), 46 deletions(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 00008a0da..6ead3d807 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -2166,56 +2166,53 @@ "type": "boolean", "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." }, - "cron": { + "definitions": { "type": "object", "additionalProperties": false, - "description": "Configuration for Velero's schedules cron.", + "description": "Configuration for Velero schedules.", "properties": { "manifests": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } }, "full": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } } } - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." } } }, - "eks": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" - }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "eks" - ] - }, - "Spec.Distribution.Modules.Dr.Velero.Eks": { - "type": "object", - "additionalProperties": false, - "properties": { - "region": { - "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" - }, - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "maxLength": 49, - "description": "The name of the velero bucket" - } - }, - "required": [ - "region", - "bucketName" - ] + } }, "Spec.Distribution.Modules.Auth": { "type": "object", diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 3e4451b36..b42ddecf8 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1268,24 +1268,57 @@ "type": "boolean", "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." }, - "cron": { + "definitions": { "type": "object", "additionalProperties": false, - "description": "Configuration for Velero's schedules cron.", + "description": "Configuration for Velero schedules.", "properties": { "manifests": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } }, "full": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } } } - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "snapshotController": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the additional snapshotController component installation.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in." } } }, From 8d1fcea31d3356241e601e7fe945ad16dd487563 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:47:31 +0100 Subject: [PATCH 051/160] feat: moving snapshotcontroller installation under provider type none, since EKS will install it's own with the aws module --- templates/distribution/manifests/dr/kustomization.yaml.tpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/distribution/manifests/dr/kustomization.yaml.tpl b/templates/distribution/manifests/dr/kustomization.yaml.tpl index 5368284d4..f3fdea029 100644 --- a/templates/distribution/manifests/dr/kustomization.yaml.tpl +++ b/templates/distribution/manifests/dr/kustomization.yaml.tpl @@ -19,14 +19,14 @@ resources: - resources/volumeSnapshotLocation.yaml {{- end }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-node-agent" }} +{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} + - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/snapshot-controller" }} +{{- end }} {{- end }} {{- if .spec.distribution.modules.dr.velero.schedules.install }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/velero-schedules" }} {{- end }} -{{- if .spec.distribution.modules.dr.velero.snapshotController.install }} - - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/dr/katalog/velero/snapshot-controller" }} -{{- end }} {{- if eq .spec.distribution.common.provider.type "eks" }} - resources/eks-velero-backupstoragelocation.yml - resources/eks-velero-volumesnapshotlocation.yml From b2781b298b1f7cbe48096ecde89dd7228bece6c0 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 17:58:43 +0100 Subject: [PATCH 052/160] fix: restore delete schema piece --- schemas/public/ekscluster-kfd-v1alpha2.json | 27 ++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 6ead3d807..a51b5e084 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -2209,10 +2209,35 @@ } } }, + "eks": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" + }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - } + }, + "required": [ + "eks" + ] + }, + "Spec.Distribution.Modules.Dr.Velero.Eks": { + "type": "object", + "additionalProperties": false, + "properties": { + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "The region where the velero bucket is located" + }, + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "maxLength": 49, + "description": "The name of the velero bucket" + } + }, + "required": [ + "region", + "bucketName" + ] }, "Spec.Distribution.Modules.Auth": { "type": "object", From 8220d820d0af308d2c93c8155f3179beacab22f8 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 18:06:05 +0100 Subject: [PATCH 053/160] docs: regenerate docs --- docs/schemas/ekscluster-kfd-v1alpha2.md | 297 +++++++++++-------- docs/schemas/kfddistribution-kfd-v1alpha2.md | 286 +++++++++++------- docs/schemas/onpremises-kfd-v1alpha2.md | 288 +++++++++++------- 3 files changed, 522 insertions(+), 349 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 387244539..aaa1e16d4 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -33,7 +33,7 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -145,7 +145,7 @@ The tolerations that will be added to the pods for all the KFD modules ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -163,7 +163,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -210,7 +210,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -418,7 +418,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -609,7 +609,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -627,7 +627,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -677,7 +677,7 @@ The tolerations that will be added to the pods for the auth module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -695,7 +695,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -791,7 +791,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -805,7 +805,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -915,7 +915,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -990,7 +990,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1008,7 +1008,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1076,7 +1076,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1094,7 +1094,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1149,7 +1149,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1167,7 +1167,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1235,7 +1235,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1253,7 +1253,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1303,7 +1303,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1321,7 +1321,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1381,7 +1381,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1399,7 +1399,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1420,7 +1420,7 @@ The type of the DR, must be ***none*** or ***eks*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------| @@ -1460,7 +1460,7 @@ The region where the velero bucket is located ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -1528,7 +1528,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1546,7 +1546,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1563,53 +1563,91 @@ The value of the toleration ### Properties -| Property | Type | Required | -|:------------------------------------------------------------|:----------|:---------| -| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional | -| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | -| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional | +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | ### Description Configuration for Velero's backup schedules. -## .spec.distribution.modules.dr.velero.schedules.cron +## .spec.distribution.modules.dr.velero.schedules.definitions ### Properties -| Property | Type | Required | -|:--------------------------------------------------------------------|:---------|:---------| -| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional | -| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | + +### Description + +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | ### Description -Configuration for Velero's schedules cron. +Configuration for Velero's manifests backup schedule. -## .spec.distribution.modules.dr.velero.schedules.cron.full +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule ### Description The cron expression for the `full` backup schedule (default `0 1 * * *`). -## .spec.distribution.modules.dr.velero.schedules.cron.manifests +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData ### Description -The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). +SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. -## .spec.distribution.modules.dr.velero.schedules.install +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl ### Description -Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests -## .spec.distribution.modules.dr.velero.schedules.ttl +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule + +### Description + +The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl ### Description The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. +## .spec.distribution.modules.dr.velero.schedules.install + +### Description + +Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. + ## .spec.distribution.modules.ingress ### Properties @@ -1675,7 +1713,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1716,7 +1754,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1734,7 +1772,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1791,7 +1829,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1809,7 +1847,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1906,7 +1944,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1924,7 +1962,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1985,7 +2023,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2003,7 +2041,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2033,7 +2071,7 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -2069,7 +2107,7 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2148,7 +2186,7 @@ The tolerations that will be added to the pods for the ingress module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2166,7 +2204,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2236,7 +2274,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2254,7 +2292,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2348,7 +2386,7 @@ This value defines where the output from Flow will be sent. Will be the `spec` s ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2492,7 +2530,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2510,7 +2548,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2595,7 +2633,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2613,7 +2651,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2691,7 +2729,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2740,7 +2778,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2758,7 +2796,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2808,7 +2846,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2826,7 +2864,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2847,7 +2885,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2948,7 +2986,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2966,7 +3004,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3031,7 +3069,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3049,7 +3087,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3116,7 +3154,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3134,7 +3172,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3166,7 +3204,7 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3249,7 +3287,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3267,7 +3305,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3330,7 +3368,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3348,7 +3386,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3425,7 +3463,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3443,7 +3481,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3626,7 +3664,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3677,7 +3715,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3695,7 +3733,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3751,7 +3789,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3769,7 +3807,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3824,7 +3862,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3842,7 +3880,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3891,7 +3929,7 @@ The enforcement action to use for the gatekeeper module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3939,7 +3977,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3957,7 +3995,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4027,7 +4065,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4045,7 +4083,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4066,7 +4104,7 @@ The validation failure action to use for the kyverno module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -4110,7 +4148,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4128,7 +4166,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4149,7 +4187,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4212,7 +4250,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4230,7 +4268,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4307,7 +4345,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4325,7 +4363,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4357,7 +4395,7 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4440,7 +4478,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4458,7 +4496,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4485,7 +4523,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4919,7 +4957,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------------| @@ -5032,7 +5070,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5111,7 +5149,7 @@ The type of the FW rule can be ingress or egress ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5190,7 +5228,7 @@ The type of the FW rule can be ingress or egress ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5242,7 +5280,7 @@ The container runtime to use for the nodes ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -5285,7 +5323,7 @@ The size of the disk in GB ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------| @@ -5365,7 +5403,7 @@ AWS tags that will be added to the ASG and EC2 instances ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------------| @@ -5380,7 +5418,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------------| @@ -5474,14 +5512,15 @@ Overrides the default IAM role name prefix for the EKS workers ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5489,6 +5528,12 @@ Overrides the default IAM role name prefix for the EKS workers The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description @@ -5580,7 +5625,7 @@ The name of the kustomize plugin ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5687,7 +5732,7 @@ This value defines in which region the bucket is located ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 6118a1540..1f9bbf63e 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -29,7 +29,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -137,7 +137,7 @@ The tolerations that will be added to the pods for all the KFD modules ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -155,7 +155,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -202,7 +202,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -410,7 +410,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -606,7 +606,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -624,7 +624,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -674,7 +674,7 @@ The tolerations that will be added to the pods for the auth module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -692,7 +692,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -788,7 +788,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -802,7 +802,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -912,7 +912,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -967,7 +967,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -985,7 +985,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1006,7 +1006,7 @@ The type of the DR, must be ***none*** or ***on-premises*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1017,12 +1017,13 @@ The type of the DR, must be ***none*** or ***on-premises*** ### Properties -| Property | Type | Required | -|:---------------------------------------------------------------------|:---------|:---------| -| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | -| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | -| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | -| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| Property | Type | Required | +|:-------------------------------------------------------------------------|:---------|:---------| +| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | +| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | +| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | +| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | ## .spec.distribution.modules.dr.velero.backend @@ -1032,7 +1033,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1119,7 +1120,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1137,7 +1138,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1154,52 +1155,108 @@ The value of the toleration ### Properties -| Property | Type | Required | -|:------------------------------------------------------------|:----------|:---------| -| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional | -| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | -| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional | +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | ### Description Configuration for Velero's backup schedules. -## .spec.distribution.modules.dr.velero.schedules.cron +## .spec.distribution.modules.dr.velero.schedules.definitions ### Properties -| Property | Type | Required | -|:--------------------------------------------------------------------|:---------|:---------| -| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional | -| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | ### Description -Configuration for Velero's schedules cron. +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties -## .spec.distribution.modules.dr.velero.schedules.cron.full +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule ### Description The cron expression for the `full` backup schedule (default `0 1 * * *`). -## .spec.distribution.modules.dr.velero.schedules.cron.manifests +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData + +### Description + +SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests + +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule ### Description The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + ## .spec.distribution.modules.dr.velero.schedules.install ### Description Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. -## .spec.distribution.modules.dr.velero.schedules.ttl +## .spec.distribution.modules.dr.velero.snapshotController + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------|:----------|:---------| +| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional | ### Description -The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. +Configuration for the additional snapshotController component installation. + +## .spec.distribution.modules.dr.velero.snapshotController.install + +### Description + +Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in. ## .spec.distribution.modules.ingress @@ -1265,7 +1322,7 @@ The type of the cluster issuer, must be ***http01*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1305,7 +1362,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1323,7 +1380,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1378,7 +1435,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1396,7 +1453,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1457,7 +1514,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1475,7 +1532,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1505,7 +1562,7 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1541,7 +1598,7 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1620,7 +1677,7 @@ The tolerations that will be added to the pods for the ingress module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1638,7 +1695,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1708,7 +1765,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1726,7 +1783,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1820,7 +1877,7 @@ This value defines where the output from Flow will be sent. Will be the `spec` s ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1964,7 +2021,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1982,7 +2039,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2067,7 +2124,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2085,7 +2142,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2163,7 +2220,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2212,7 +2269,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2230,7 +2287,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2280,7 +2337,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2298,7 +2355,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2319,7 +2376,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2420,7 +2477,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2438,7 +2495,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2503,7 +2560,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2521,7 +2578,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2588,7 +2645,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2606,7 +2663,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2638,7 +2695,7 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2721,7 +2778,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2739,7 +2796,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2802,7 +2859,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2820,7 +2877,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2897,7 +2954,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2915,7 +2972,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3098,7 +3155,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3149,7 +3206,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3167,7 +3224,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3237,7 +3294,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3255,7 +3312,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3317,7 +3374,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3335,7 +3392,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3390,7 +3447,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3408,7 +3465,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3429,7 +3486,7 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3473,7 +3530,7 @@ The enforcement action to use for the gatekeeper module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3521,7 +3578,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3539,7 +3596,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3609,7 +3666,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3627,7 +3684,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3648,7 +3705,7 @@ The validation failure action to use for the kyverno module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3692,7 +3749,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3710,7 +3767,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3731,7 +3788,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -3794,7 +3851,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3812,7 +3869,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3889,7 +3946,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3907,7 +3964,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3939,7 +3996,7 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4022,7 +4079,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4040,7 +4097,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4067,7 +4124,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4102,14 +4159,15 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4117,6 +4175,12 @@ The type of tracing to use, either ***none*** or ***tempo*** The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index f620b0661..05e604d21 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -29,7 +29,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -149,7 +149,7 @@ An array with the tolerations that will be added to the pods for all the KFD mod ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -167,7 +167,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -214,7 +214,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -422,7 +422,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -632,7 +632,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -650,7 +650,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -827,7 +827,7 @@ Set to override the tolerations that will be added to the pods of the Auth modul ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -845,7 +845,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -941,7 +941,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -955,7 +955,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1072,7 +1072,7 @@ The type of the Auth provider, options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -1135,7 +1135,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1153,7 +1153,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1174,7 +1174,7 @@ The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disab ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1185,12 +1185,13 @@ The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disab ### Properties -| Property | Type | Required | -|:---------------------------------------------------------------------|:---------|:---------| -| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | -| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | -| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | -| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| Property | Type | Required | +|:-------------------------------------------------------------------------|:---------|:---------| +| [backend](#specdistributionmodulesdrvelerobackend) | `string` | Optional | +| [externalEndpoint](#specdistributionmodulesdrveleroexternalendpoint) | `object` | Optional | +| [overrides](#specdistributionmodulesdrvelerooverrides) | `object` | Optional | +| [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | +| [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | ### Description @@ -1204,7 +1205,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1291,7 +1292,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1309,7 +1310,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1326,52 +1327,108 @@ The value of the toleration ### Properties -| Property | Type | Required | -|:------------------------------------------------------------|:----------|:---------| -| [cron](#specdistributionmodulesdrveleroschedulescron) | `object` | Optional | -| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | -| [ttl](#specdistributionmodulesdrveleroschedulesttl) | `string` | Optional | +| Property | Type | Required | +|:--------------------------------------------------------------------|:----------|:---------| +| [definitions](#specdistributionmodulesdrveleroschedulesdefinitions) | `object` | Optional | +| [install](#specdistributionmodulesdrveleroschedulesinstall) | `boolean` | Optional | ### Description Configuration for Velero's backup schedules. -## .spec.distribution.modules.dr.velero.schedules.cron +## .spec.distribution.modules.dr.velero.schedules.definitions ### Properties -| Property | Type | Required | -|:--------------------------------------------------------------------|:---------|:---------| -| [full](#specdistributionmodulesdrveleroschedulescronfull) | `string` | Optional | -| [manifests](#specdistributionmodulesdrveleroschedulescronmanifests) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------|:---------|:---------| +| [full](#specdistributionmodulesdrveleroschedulesdefinitionsfull) | `object` | Optional | +| [manifests](#specdistributionmodulesdrveleroschedulesdefinitionsmanifests) | `object` | Optional | ### Description -Configuration for Velero's schedules cron. +Configuration for Velero schedules. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full + +### Properties -## .spec.distribution.modules.dr.velero.schedules.cron.full +| Property | Type | Required | +|:---------------------------------------------------------------------------------------------|:----------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsfullschedule) | `string` | Optional | +| [snapshotMoveData](#specdistributionmodulesdrveleroschedulesdefinitionsfullsnapshotmovedata) | `boolean` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsfullttl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.schedule ### Description The cron expression for the `full` backup schedule (default `0 1 * * *`). -## .spec.distribution.modules.dr.velero.schedules.cron.manifests +## .spec.distribution.modules.dr.velero.schedules.definitions.full.snapshotMoveData + +### Description + +SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. + +## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests + +### Properties + +| Property | Type | Required | +|:----------------------------------------------------------------------------------|:---------|:---------| +| [schedule](#specdistributionmodulesdrveleroschedulesdefinitionsmanifestsschedule) | `string` | Optional | +| [ttl](#specdistributionmodulesdrveleroschedulesdefinitionsmanifeststtl) | `string` | Optional | + +### Description + +Configuration for Velero's manifests backup schedule. + +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.schedule ### Description The cron expression for the `manifests` backup schedule (default `*/15 * * * *`). +## .spec.distribution.modules.dr.velero.schedules.definitions.manifests.ttl + +### Description + +The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. + ## .spec.distribution.modules.dr.velero.schedules.install ### Description Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`. -## .spec.distribution.modules.dr.velero.schedules.ttl +## .spec.distribution.modules.dr.velero.snapshotController + +### Properties + +| Property | Type | Required | +|:---------------------------------------------------------------------|:----------|:---------| +| [install](#specdistributionmodulesdrvelerosnapshotcontrollerinstall) | `boolean` | Optional | ### Description -The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL. +Configuration for the additional snapshotController component installation. + +## .spec.distribution.modules.dr.velero.snapshotController.install + +### Description + +Whether to install or not the snapshotController component in the cluster. Before enabling this field, check if your CSI driver does not have snapshotController built-in. ## .spec.distribution.modules.ingress @@ -1446,7 +1503,7 @@ The type of the clusterIssuer. Only `http01` challenge is supported for on-premi ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1486,7 +1543,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1504,7 +1561,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1559,7 +1616,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1577,7 +1634,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1638,7 +1695,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1656,7 +1713,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1686,7 +1743,7 @@ The provider of the TLS certificates for the ingresses, one of: `none`, `certMan ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1737,7 +1794,7 @@ The type of the nginx ingress controller, options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1820,7 +1877,7 @@ Set to override the tolerations that will be added to the pods of the Ingress mo ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1838,7 +1895,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1918,7 +1975,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1936,7 +1993,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2038,7 +2095,7 @@ The storage backend type for Loki. `minio` will use an in-cluster MinIO deployme ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2190,7 +2247,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2208,7 +2265,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2293,7 +2350,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2311,7 +2368,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2389,7 +2446,7 @@ The type of OpenSearch deployment. One of: `single` for a single replica or `tri ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2442,7 +2499,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2460,7 +2517,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2514,7 +2571,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2532,7 +2589,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2557,7 +2614,7 @@ Selects the logging stack. Options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2658,7 +2715,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2676,7 +2733,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2741,7 +2798,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2759,7 +2816,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2826,7 +2883,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2844,7 +2901,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2880,7 +2937,7 @@ The storage backend type for Mimir. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2967,7 +3024,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2985,7 +3042,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3052,7 +3109,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3070,7 +3127,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3151,7 +3208,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3169,7 +3226,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3352,7 +3409,7 @@ The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or ` ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3403,7 +3460,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3421,7 +3478,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3499,7 +3556,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3517,7 +3574,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3587,7 +3644,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3605,7 +3662,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3660,7 +3717,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3678,7 +3735,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3699,7 +3756,7 @@ The type of CNI plugin to use, either `calico` (default, via the Tigera Operator ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3750,7 +3807,7 @@ The default enforcement action to use for the included constraints. `deny` will ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3798,7 +3855,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3816,7 +3873,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3890,7 +3947,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3908,7 +3965,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3929,7 +3986,7 @@ The validation failure action to use for the policies, `Enforce` will block when ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3977,7 +4034,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3995,7 +4052,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4016,7 +4073,7 @@ The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4087,7 +4144,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4105,7 +4162,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4186,7 +4243,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4204,7 +4261,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4240,7 +4297,7 @@ The storage backend type for Tempo. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4327,7 +4384,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4345,7 +4402,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4372,7 +4429,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -5004,7 +5061,7 @@ Name for the node group. It will be also used as the node role label. It should ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -5150,14 +5207,15 @@ The subnet CIDR to use for the Services network. ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5165,6 +5223,12 @@ The subnet CIDR to use for the Services network. The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description From 969754c9df2187c26655d5fad29fc5147c4a3adb Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 18:06:22 +0100 Subject: [PATCH 054/160] feat: regenerate private eks cluster schema --- schemas/private/ekscluster-kfd-v1alpha2.json | 42 +++++++++++++++----- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index ea44d253c..862be216a 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -593,24 +593,46 @@ "type": "boolean", "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." }, - "cron": { + "definitions": { "type": "object", "additionalProperties": false, - "description": "Configuration for Velero's schedules cron.", + "description": "Configuration for Velero schedules.", "properties": { "manifests": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } }, "full": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } } } - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." } } }, From 64dc6d0966cbad9519d7e047bc0873567d4b1fbe Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sat, 16 Nov 2024 18:06:36 +0100 Subject: [PATCH 055/160] feat: regenerate go schema files --- .../ekscluster/v1alpha2/private/schema.go | 42 +- pkg/apis/ekscluster/v1alpha2/public/schema.go | 2311 +++++++++-------- .../kfddistribution/v1alpha2/public/schema.go | 1290 ++++----- pkg/apis/onpremises/v1alpha2/public/schema.go | 1917 +++++++------- 4 files changed, 2847 insertions(+), 2713 deletions(-) diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 08b1c1a25..c83abc9e9 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -586,12 +586,32 @@ type SpecDistributionModulesDrVeleroEks struct { // Configuration for Velero's backup schedules. type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero's schedules cron. - Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"` + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` // Whether to install or not the default `manifests` and `full` backups schedules. // Default is `true`. Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved. Velero will + // create a new volume from the snapshot and upload the content to the + // storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default // `720h0m0s`, 30 days). Notice that changing this value will affect only newly @@ -599,14 +619,16 @@ type SpecDistributionModulesDrVeleroSchedules struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -// Configuration for Velero's schedules cron. -type SpecDistributionModulesDrVeleroSchedulesCron struct { - // The cron expression for the `full` backup schedule (default `0 1 * * *`). - Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { // The cron expression for the `manifests` backup schedule (default `*/15 * * * // *`). - Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } type SpecDistributionModulesIngress struct { @@ -1756,6 +1778,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 76fada170..b0ca936b2 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -574,12 +574,32 @@ type SpecDistributionModulesDrVeleroEks struct { // Configuration for Velero's backup schedules. type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero's schedules cron. - Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"` + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` // Whether to install or not the default `manifests` and `full` backups schedules. // Default is `true`. Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved. Velero will + // create a new volume from the snapshot and upload the content to the + // storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default // `720h0m0s`, 30 days). Notice that changing this value will affect only newly @@ -587,14 +607,16 @@ type SpecDistributionModulesDrVeleroSchedules struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -// Configuration for Velero's schedules cron. -type SpecDistributionModulesDrVeleroSchedulesCron struct { - // The cron expression for the `full` backup schedule (default `0 1 * * *`). - Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { // The cron expression for the `manifests` backup schedule (default `*/15 * * * // *`). - Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } type SpecDistributionModulesIngress struct { @@ -1709,6 +1731,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1811,712 +1837,457 @@ const ( TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) -type TypesAwsS3BucketName string - -type TypesAwsS3BucketNamePrefix string - -type TypesAwsS3KeyPrefix string - -type TypesAwsSshPubKey string - -type TypesAwsSubnetId string - -type TypesAwsTags map[string]string - -type TypesAwsVpcId string - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + } + *j = SpecKubernetesNodePoolInstanceVolumeType(v) + return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", } -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string - -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } -type TypesKubeTaints []string - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - -const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", -} +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionCommonProvider + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesAuth + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistribution(plain) return nil } +type TypesCidr string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) - } - *j = SpecKubernetesLogsTypesElem(v) - return nil -} - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) - } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) - } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) - return nil +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecInfrastructureVpc(plain) return nil } +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} +type TypesAwsVpcId string // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) - } - *j = TypesAwsRegion(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") - } - type Plain SpecKubernetesNodePoolInstance - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } @@ -2541,507 +2312,619 @@ func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesAPIServer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - type Plain SpecInfrastructureVpcNetwork + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + } + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistribution + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - type Plain SpecDistributionModules + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) return nil } +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecKubernetesNodePool + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } +type TypesAwsTags map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", "none", - "tempo", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil -} - -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. @@ -3065,950 +2948,1118 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecKubernetesNodePoolSize + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecToolsConfigurationTerraformState + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) - } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesDrVelero(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecToolsConfigurationTerraform + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraform(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } +const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecToolsConfiguration + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +const TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + } + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + } + type Plain SpecKubernetesNodePoolAmi var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecKubernetesNodePoolAmi(plain) return nil } +const TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } +const ( + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" +) + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain Spec + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } +const ( + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecDistributionModulesDr(plain) + *j = TypesAwsRegion(v) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain TypesKubeToleration + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } +type TypesKubeLabels_1 map[string]string + +type TypesAwsS3BucketName string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + } + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } +type TypesAwsSubnetId string + +type TypesKubeTaints []string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesDrType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") - } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesMonitoring(plain) - return nil +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = TypesKubeTolerationOperator(v) + *j = SpecKubernetesNodePoolType(v) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecKubernetesNodePool(plain) return nil } +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = TypesKubeTolerationEffect(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecKubernetes(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) - return nil -} - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) - } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - *j = SpecDistributionModulesMonitoringType(v) + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} +type TypesAwsS3KeyPrefix string // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + } + type Plain SpecToolsConfigurationTerraformStateS3 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + type Plain SpecToolsConfigurationTerraformState + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecToolsConfigurationTerraformState(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain TypesKubeToleration_1 + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecToolsConfiguration(plain) return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + return fmt.Errorf("field kubernetes in Spec: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = TypesKubeToleration(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` } +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = TypesKubeTolerationOperator(v) return nil } +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = TypesKubeTolerationEffect_1(v) return nil } +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = TypesKubeTolerationOperator_1(v) return nil } +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = TypesKubeToleration_1(plain) return nil } +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } +type TypesAwsSshPubKey string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionCommonProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionCommonProvider(plain) return nil } +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string @@ -4029,23 +4080,7 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil -} +type TypesKubeNodeSelector map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 9a4f9ca9e..a82d0d12f 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -511,6 +511,9 @@ type SpecDistributionModulesDrVelero struct { // Configuration for Velero's backup schedules. Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` } type SpecDistributionModulesDrVeleroBackend string @@ -540,12 +543,32 @@ type SpecDistributionModulesDrVeleroExternalEndpoint struct { // Configuration for Velero's backup schedules. type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero's schedules cron. - Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"` + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` // Whether to install or not the default `manifests` and `full` backups schedules. // Default is `true`. Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved. Velero will + // create a new volume from the snapshot and upload the content to the + // storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default // `720h0m0s`, 30 days). Notice that changing this value will affect only newly @@ -553,14 +576,24 @@ type SpecDistributionModulesDrVeleroSchedules struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -// Configuration for Velero's schedules cron. -type SpecDistributionModulesDrVeleroSchedulesCron struct { - // The cron expression for the `full` backup schedule (default `0 1 * * *`). - Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { // The cron expression for the `manifests` backup schedule (default `*/15 * * * // *`). - Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` } type SpecDistributionModulesIngress struct { @@ -1222,222 +1255,343 @@ type SpecDistributionModulesTracingTempo struct { type SpecDistributionModulesTracingTempoBackend string -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +const SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The cpu limit for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ "minio", "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - *j = SpecDistributionModulesLogging(plain) - return nil -} + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil +} + +type TypesCidr string + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { @@ -1480,11 +1634,25 @@ func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil } var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ @@ -1514,56 +1682,67 @@ func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionModulesLoggingOpensearch + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesDrVeleroBackend(v) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1582,12 +1761,9 @@ func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", } var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ @@ -1616,78 +1792,44 @@ func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesDrType(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") - } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the security module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1709,25 +1851,15 @@ func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngress(plain) - return nil +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ @@ -1756,49 +1888,61 @@ func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1820,22 +1964,10 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") - } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLS(plain) - return nil +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } var enumValues_SpecDistributionModulesPolicyType = []interface{}{ @@ -1865,73 +1997,73 @@ func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1951,48 +2083,46 @@ func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ @@ -2020,73 +2150,49 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } +const SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesDr + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) - } - *j = SpecDistributionModulesDrVeleroBackend(v) + *j = SpecDistributionModulesLogging(plain) return nil } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", -} +const TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} +type TypesUri string var enumValues_SpecDistributionModulesTracingType = []interface{}{ "none", @@ -2113,35 +2219,15 @@ func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } // UnmarshalJSON implements json.Unmarshaler. @@ -2163,215 +2249,178 @@ func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["kubeconfig"]; !ok || v == nil { - return fmt.Errorf("field kubeconfig in SpecDistribution: required") - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") - } - type Plain SpecPluginsHelmReleasesElemSetElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecPluginsHelmReleasesElemSetElem(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModules(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["kubeconfig"]; !ok || v == nil { + return fmt.Errorf("field kubeconfig in SpecDistribution: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistribution(plain) return nil } +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2396,46 +2445,6 @@ func (j *Spec) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil -} - -type TypesKubeLabels map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} - var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ "create", "replace", @@ -2502,15 +2511,39 @@ func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { return nil } -const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - -type TypesKubeTolerationOperator string - var enumValues_TypesKubeTolerationOperator = []interface{}{ "Exists", "Equal", } +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2569,11 +2602,27 @@ func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { } const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" ) +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key id of the external tempo backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external tempo backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external tempo backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external tempo backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external tempo backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + type TypesKubeTolerationOperator_1 string var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ @@ -2644,31 +2693,22 @@ func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { return nil } -const ( - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2687,13 +2727,9 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} +type TypesEnvRef string -type TypesKubeTolerationEffect string +type TypesFileRef string type TypesIpAddress string @@ -2707,7 +2743,7 @@ type TypesSshPubKey string type TypesTcpPort int -type TypesUri string +type SpecDistributionModulesTracingType string // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 854a24a16..fcf84c0d1 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -597,6 +597,9 @@ type SpecDistributionModulesDrVelero struct { // Configuration for Velero's backup schedules. Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` } type SpecDistributionModulesDrVeleroBackend string @@ -626,12 +629,32 @@ type SpecDistributionModulesDrVeleroExternalEndpoint struct { // Configuration for Velero's backup schedules. type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero's schedules cron. - Cron *SpecDistributionModulesDrVeleroSchedulesCron `json:"cron,omitempty" yaml:"cron,omitempty" mapstructure:"cron,omitempty"` + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` // Whether to install or not the default `manifests` and `full` backups schedules. // Default is `true`. Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // SnapshotMoveData specifies whether snapshot data should be moved. Velero will + // create a new volume from the snapshot and upload the content to the + // storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default // `720h0m0s`, 30 days). Notice that changing this value will affect only newly @@ -639,14 +662,24 @@ type SpecDistributionModulesDrVeleroSchedules struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -// Configuration for Velero's schedules cron. -type SpecDistributionModulesDrVeleroSchedulesCron struct { - // The cron expression for the `full` backup schedule (default `0 1 * * *`). - Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { // The cron expression for the `manifests` backup schedule (default `*/15 * * * // *`). - Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` } type SpecDistributionModulesIngress struct { @@ -1139,583 +1172,426 @@ type SpecDistributionModulesMonitoringMinio struct { StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the default MinIO root user. - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} - // The username for the default MinIO root user. - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil } -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} - // The retention size for the `k8s` Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} - // The retention time for the `k8s` Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} - // The storage size for the `k8s` Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil } -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} +type TypesKubeResourcesLimits struct { + // The cpu limit for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` -type SpecDistributionModulesMonitoringType string + // The memory limit for the prometheus pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) +type TypesKubeResourcesRequests struct { + // The cpu request for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // The memory request for the prometheus pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -// Configuration for the Networking module. -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of CNI plugin to use, either `calico` (default, via the Tigera - // Operator) or `cilium`. - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // The mask size to use for the Pods network on each node. - MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Allows specifing a CIDR for the Pods network different from - // `.spec.kubernetes.podCidr`. If not set the default is to use - // `.spec.kubernetes.podCidr`. - PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" -) - -// Configuration for the Policy module. -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of policy enforcement to use, either `none`, `gatekeeper` or - // `kyverno`. - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -// Configuration for the Gatekeeper package. -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The default enforcement action to use for the included constraints. `deny` will - // block the admission when violations to the policies are found, `warn` will show - // a message to the user but will admit the violating requests and `dryrun` won't - // give any feedback to the user but it will log the violations. - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // Set to `false` to avoid installing the default Gatekeeper policies (constraints - // templates and constraints) included with the distribution. - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -// Configuration for the Kyverno package. -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the policies on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // Set to `false` to avoid installing the default Kyverno policies included with - // distribution. - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the policies, `Enforce` will block - // when a request does not comply with the policies and `Audit` will not block but - // log when a request does not comply with the policies. - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -// Configuration for the Tracing module. -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either `none` or `tempo`. `none` will disable the - // Tracing module and `tempo` will install a Grafana Tempo deployment. - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -// Configuration for Tracing's MinIO deployment. -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each MinIO disk, 6 disks total. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the default MinIO root user. - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the default MinIO root user. - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -// Configuration for the Tempo package. -type SpecDistributionModulesTracingTempo struct { - // The storage backend type for Tempo. `minio` will use an in-cluster MinIO - // deployment for object storage, `externalEndpoint` can be used to point to an - // external S3-compatible object storage instead of deploying an in-cluster MinIO. - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // Configuration for Tempo's external storage backend. - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the traces stored in Tempo. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -// Configuration for Tempo's external storage backend. -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key ID (username) for the external S3-compatible bucket. - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external S3-compatible object storage. - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // External S3-compatible endpoint for Tempo's storage. - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, will use HTTP as protocol instead of HTTPS. - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key (password) for the external S3-compatible bucket. - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -// Defines the Kubernetes components configuration and the values needed for the -// kubernetes phase of furyctl. -type SpecKubernetes struct { - // Advanced corresponds to the JSON schema field "advanced". - Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` - - // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". - AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` - - // The address for the Kubernetes control plane. Usually a DNS entry pointing to a - // Load Balancer on port 6443. - ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` - - // The DNS zone of the machines. It will be appended to the name of each host to - // generate the `kubernetes_hostname` in the Ansible inventory file. It is also - // used to calculate etcd's initial cluster value. - DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` - - // LoadBalancers corresponds to the JSON schema field "loadBalancers". - LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` - - // Masters corresponds to the JSON schema field "masters". - Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` - - // Nodes corresponds to the JSON schema field "nodes". - Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` - - // The path to the folder where the PKI files for Kubernetes and etcd are stored. - PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` - - // The subnet CIDR to use for the Pods network. - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` - - // Proxy corresponds to the JSON schema field "proxy". - Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The subnet CIDR to use for the Services network. - SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` -} - -type SpecKubernetesAdvanced struct { - // AirGap corresponds to the JSON schema field "airGap". - AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` - - // Cloud corresponds to the JSON schema field "cloud". - Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` - - // Containerd corresponds to the JSON schema field "containerd". - Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` - - // Encryption corresponds to the JSON schema field "encryption". - Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` - - // Oidc corresponds to the JSON schema field "oidc". - Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` - - // URL of the registry where to pull images from for the Kubernetes phase. - // (Default is registry.sighup.io/fury/on-premises). - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - - // Users corresponds to the JSON schema field "users". - Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -// Advanced configuration for air-gapped installations. Allows setting custom URLs -// where to download the binaries dependencies from and custom .deb and .rpm -// package repositories. -type SpecKubernetesAdvancedAirGap struct { - // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should - // be as the one downloaded from containerd GitHub releases page. - ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` - - // DependenciesOverride corresponds to the JSON schema field - // "dependenciesOverride". - DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` - - // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded - // from - // `//etcd--linux-.tar.gz` - EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` - - // Checksum for the runc binary. - RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` - - // URL where to download the runc binary from. - RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` -} - -type SpecKubernetesAdvancedAirGapDependenciesOverride struct { - // Apt corresponds to the JSON schema field "apt". - Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` - - // Yum corresponds to the JSON schema field "yum". - Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` -} - -type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { - // URL where to download the GPG key of the Apt repository. Example: - // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // The GPG key ID of the Apt repository. Example: - // `36A1D7869245C8950F966E92D8576A8BA88D21E9` - GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` - - // An indicative name for the Apt repository. Example: `k8s-1.29` - Name string `json:"name" yaml:"name" mapstructure:"name"` +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - // A source string for the new Apt repository. Example: `deb - // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } -type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { - // URL where to download the ASCII-armored GPG key of the Yum repository. Example: - // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // If true, the GPG signature check on the packages will be enabled. - GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` - - // An indicative name for the Yum repository. Example: `k8s-1.29` - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // URL to the directory where the Yum repository's `repodata` directory lives. - // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` - - // If true, the GPG signature check on the `repodata` will be enabled. - RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -type SpecKubernetesAdvancedAnsible struct { - // Additional configuration to append to the ansible.cfg file - Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` - - // The Python interpreter to use for running Ansible. Example: python3 - PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["ip"]; !ok || v == nil { - return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesLoadBalancersHost - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) } - *j = SpecKubernetesLoadBalancersHost(plain) + *j = SpecDistributionModulesDrVeleroBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication even if - // `.spec.modules.auth.provider.type` is SSO or Basic Auth. - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // Use this host for the ingress instead of the default one. - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} - // Use this ingress class for the ingress instead of the default one. - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } -type TypesCidr string - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} // Override the common configuration with a particular configuration for the // module. @@ -1730,619 +1606,758 @@ type TypesFuryModuleOverrides struct { Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "calico", - "cilium", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ "none", - "opensearch", - "loki", - "customOutputs", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesNetworking + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworking(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesDrVeleroBackend(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required") } - type Plain SpecDistributionModulesLoggingOpensearch + type Plain SpecDistributionModulesAuthOIDCKubernetesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesDr + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) - return nil +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "calico", + "cilium", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +const ( + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` -type TypesKubeResourcesRequests struct { - // The cpu request for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The memory request for the prometheus pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `calico` (default, via the Tigera + // Operator) or `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesNetworking var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesNetworking(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - // The memory limit for the prometheus pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - *j = SpecDistributionModulesLoggingLokiBackend(v) + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecDistributionModulesPolicyType string + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesPolicyType(v) return nil } +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) + +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2364,66 +2379,63 @@ func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } -type SpecKubernetesLoadBalancersKeepalived struct { - // Set to install keepalived with a floating virtual IP shared between the load - // balancer hosts for a deployment in High Availability. - Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` - - // Name of the network interface where to bind the Keepalived virtual IP. - Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` - - // The Virtual floating IP for Keepalived - Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` - - // The passphrase for the Keepalived clustering. - Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` - - // The virtual router ID of Keepalived, must be different from other Keepalived - // instances in the same network. - VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2471,24 +2483,21 @@ func (j *SpecDistribution) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) - } - *j = SpecDistributionModulesMonitoringMimirBackend(v) - return nil +type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { + // URL where to download the GPG key of the Apt repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // The GPG key ID of the Apt repository. Example: + // `36A1D7869245C8950F966E92D8576A8BA88D21E9` + GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` + + // An indicative name for the Apt repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // A source string for the new Apt repository. Example: `deb + // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2509,36 +2518,34 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b [] if v, ok := raw["repo"]; !ok || v == nil { return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") } - type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain) return nil } +type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { + // URL where to download the ASCII-armored GPG key of the Yum repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // If true, the GPG signature check on the packages will be enabled. + GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` + + // An indicative name for the Yum repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // URL to the directory where the Yum repository's `repodata` directory lives. + // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` + + // If true, the GPG signature check on the `repodata` will be enabled. + RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2569,42 +2576,36 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b [] return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil +type SpecKubernetesAdvancedAirGapDependenciesOverride struct { + // Apt corresponds to the JSON schema field "apt". + Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` + + // Yum corresponds to the JSON schema field "yum". + Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil +// Advanced configuration for air-gapped installations. Allows setting custom URLs +// where to download the binaries dependencies from and custom .deb and .rpm +// package repositories. +type SpecKubernetesAdvancedAirGap struct { + // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should + // be as the one downloaded from containerd GitHub releases page. + ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` + + // DependenciesOverride corresponds to the JSON schema field + // "dependenciesOverride". + DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` + + // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded + // from + // `//etcd--linux-.tar.gz` + EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` + + // Checksum for the runc binary. + RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` + + // URL where to download the runc binary from. + RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` } type SpecKubernetesAdvancedCloud struct { @@ -2723,30 +2724,36 @@ type SpecKubernetesAdvancedUsers struct { Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` } -type TypesFuryModuleComponentOverrides struct { - // Set to override the node selector used to place the pods of the package. - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +type SpecKubernetesAdvanced struct { + // AirGap corresponds to the JSON schema field "airGap". + AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` - // Set to override the tolerations that will be added to the pods of the package. - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // Cloud corresponds to the JSON schema field "cloud". + Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` + + // Containerd corresponds to the JSON schema field "containerd". + Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` + + // Encryption corresponds to the JSON schema field "encryption". + Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` + + // Oidc corresponds to the JSON schema field "oidc". + Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` + + // URL of the registry where to pull images from for the Kubernetes phase. + // (Default is registry.sighup.io/fury/on-premises). + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // Users corresponds to the JSON schema field "users". + Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required") - } - type Plain SpecDistributionModulesAuthOIDCKubernetesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain) - return nil +type SpecKubernetesAdvancedAnsible struct { + // Additional configuration to append to the ansible.cfg file + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // The Python interpreter to use for running Ansible. Example: python3 + PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` } type SpecKubernetesLoadBalancersHost struct { @@ -2760,46 +2767,63 @@ type SpecKubernetesLoadBalancersHost struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") + } + type Plain SpecKubernetesLoadBalancersHost var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecKubernetesLoadBalancersHost(plain) return nil } +type SpecKubernetesLoadBalancersKeepalived struct { + // Set to install keepalived with a floating virtual IP shared between the load + // balancer hosts for a deployment in High Availability. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // Name of the network interface where to bind the Keepalived virtual IP. + Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` + + // The Virtual floating IP for Keepalived + Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` + + // The passphrase for the Keepalived clustering. + Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` + + // The virtual router ID of Keepalived, must be different from other Keepalived + // instances in the same network. + VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecKubernetesLoadBalancersKeepalived var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecKubernetesLoadBalancersKeepalived(plain) return nil } -type TypesFileRef string - // Configuration for HAProxy stats page. Accessible at http://:1936/stats type SpecKubernetesLoadBalancersStats struct { @@ -3107,22 +3131,47 @@ func (j *SpecKubernetesSSH) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil +// Defines the Kubernetes components configuration and the values needed for the +// kubernetes phase of furyctl. +type SpecKubernetes struct { + // Advanced corresponds to the JSON schema field "advanced". + Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` + + // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". + AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` + + // The address for the Kubernetes control plane. Usually a DNS entry pointing to a + // Load Balancer on port 6443. + ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` + + // The DNS zone of the machines. It will be appended to the name of each host to + // generate the `kubernetes_hostname` in the Ansible inventory file. It is also + // used to calculate etcd's initial cluster value. + DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` + + // LoadBalancers corresponds to the JSON schema field "loadBalancers". + LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` + + // Masters corresponds to the JSON schema field "masters". + Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` + + // Nodes corresponds to the JSON schema field "nodes". + Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` + + // The path to the folder where the PKI files for Kubernetes and etcd are stored. + PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` + + // The subnet CIDR to use for the Pods network. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` + + // Proxy corresponds to the JSON schema field "proxy". + Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The subnet CIDR to use for the Services network. + SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3200,6 +3249,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -3560,23 +3613,7 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { type TypesEnvRef string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") - } - type Plain SpecKubernetesLoadBalancersKeepalived - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesLoadBalancersKeepalived(plain) - return nil -} +type TypesFileRef string type TypesIpAddress string From 8bc33e6bd24443727afed58955f989e7512c3ff9 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sun, 17 Nov 2024 12:04:31 +0100 Subject: [PATCH 056/160] feat: add experimental mark on snapshotMoveData parameter --- schemas/public/ekscluster-kfd-v1alpha2.json | 2 +- schemas/public/kfddistribution-kfd-v1alpha2.json | 2 +- schemas/public/onpremises-kfd-v1alpha2.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index a51b5e084..7b78086c8 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -2201,7 +2201,7 @@ }, "snapshotMoveData": { "type": "boolean", - "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index b42ddecf8..7cafefcce 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1303,7 +1303,7 @@ }, "snapshotMoveData": { "type": "boolean", - "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 098cbb22b..96b4c5dfb 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1882,7 +1882,7 @@ }, "snapshotMoveData": { "type": "boolean", - "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." } } } From 1527a0660b088efbc3a6bf8b965ad24ead55d6f1 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Sun, 17 Nov 2024 12:05:09 +0100 Subject: [PATCH 057/160] chores: update go library and docs --- docs/schemas/ekscluster-kfd-v1alpha2.md | 2 +- docs/schemas/kfddistribution-kfd-v1alpha2.md | 2 +- docs/schemas/onpremises-kfd-v1alpha2.md | 2 +- pkg/apis/ekscluster/v1alpha2/private/schema.go | 9 ++++++--- pkg/apis/ekscluster/v1alpha2/public/schema.go | 9 ++++++--- pkg/apis/kfddistribution/v1alpha2/public/schema.go | 9 ++++++--- pkg/apis/onpremises/v1alpha2/public/schema.go | 9 ++++++--- schemas/private/ekscluster-kfd-v1alpha2.json | 2 +- 8 files changed, 28 insertions(+), 16 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index aaa1e16d4..9490f92b2 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -1609,7 +1609,7 @@ The cron expression for the `full` backup schedule (default `0 1 * * *`). ### Description -SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. ## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 1f9bbf63e..269f29796 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -1201,7 +1201,7 @@ The cron expression for the `full` backup schedule (default `0 1 * * *`). ### Description -SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. ## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index 05e604d21..f45c18cd8 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -1373,7 +1373,7 @@ The cron expression for the `full` backup schedule (default `0 1 * * *`). ### Description -SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. +EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation. ## .spec.distribution.modules.dr.velero.schedules.definitions.full.ttl diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index c83abc9e9..b45b524a9 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -608,9 +608,12 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { // The cron expression for the `full` backup schedule (default `0 1 * * *`). Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` - // SnapshotMoveData specifies whether snapshot data should be moved. Velero will - // create a new volume from the snapshot and upload the content to the - // storageLocation. + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index b0ca936b2..b9224116f 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -596,9 +596,12 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { // The cron expression for the `full` backup schedule (default `0 1 * * *`). Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` - // SnapshotMoveData specifies whether snapshot data should be moved. Velero will - // create a new volume from the snapshot and upload the content to the - // storageLocation. + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index a82d0d12f..96038dc8f 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -565,9 +565,12 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { // The cron expression for the `full` backup schedule (default `0 1 * * *`). Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` - // SnapshotMoveData specifies whether snapshot data should be moved. Velero will - // create a new volume from the snapshot and upload the content to the - // storageLocation. + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index fcf84c0d1..4fccde58a 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -651,9 +651,12 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { // The cron expression for the `full` backup schedule (default `0 1 * * *`). Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` - // SnapshotMoveData specifies whether snapshot data should be moved. Velero will - // create a new volume from the snapshot and upload the content to the - // storageLocation. + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` // The Time To Live (TTL) of the backups created by the backup schedules (default diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 862be216a..3b2b53163 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -628,7 +628,7 @@ }, "snapshotMoveData": { "type": "boolean", - "description": "SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." } } } From c790f96be03cd5f2e4400e6400c772daf4c90f4c Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Mon, 18 Nov 2024 09:12:29 +0100 Subject: [PATCH 058/160] feat(schemas)(loki): add required 'enabled' property for tsdbSchemav13Migration in schemas --- schemas/private/ekscluster-kfd-v1alpha2.json | 24 +++++++++++++++++++ schemas/public/ekscluster-kfd-v1alpha2.json | 5 +++- .../public/kfddistribution-kfd-v1alpha2.json | 5 +++- schemas/public/onpremises-kfd-v1alpha2.json | 5 +++- 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 2aa905308..06f30232d 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1144,6 +1144,30 @@ }, "resources": { "$ref": "#/$defs/Types.KubeResources" + }, + "tsdbSchemav13Migration": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean", + "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" + }, + "schemaConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + } + } + } + }, + "required": [ + "enabled" + ] } } }, diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 75eacc6dc..cc5d04288 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1553,7 +1553,10 @@ } } } - } + }, + "required": [ + "enabled" + ] } } }, diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 52f483bc9..b0af22493 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -620,7 +620,10 @@ } } } - } + }, + "required": [ + "enabled" + ] } } }, diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index d2c786ad5..b9f55a6cf 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1204,7 +1204,10 @@ } } } - } + }, + "required": [ + "enabled" + ] } } }, From 2781dfe6537d8ddd04f2ebd5b735ea03a0a7d396 Mon Sep 17 00:00:00 2001 From: Manuel Romei Date: Mon, 18 Nov 2024 10:56:01 +0100 Subject: [PATCH 059/160] chore: regenerate Go models --- pkg/apis/ekscluster/v1alpha2/private/schema.go | 4 ++++ pkg/apis/ekscluster/v1alpha2/public/schema.go | 4 ++++ pkg/apis/kfddistribution/v1alpha2/public/schema.go | 4 ++++ pkg/apis/onpremises/v1alpha2/public/schema.go | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 08b1c1a25..5d3a616cb 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -1756,6 +1756,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 76fada170..8d2d43563 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1709,6 +1709,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 9a4f9ca9e..9a45ff86a 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -1271,6 +1271,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 854a24a16..426125b61 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -3200,6 +3200,10 @@ type SpecPluginsHelmReleases []struct { // The chart of the release Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + // The name of the release Name string `json:"name" yaml:"name" mapstructure:"name"` From f12063c569306dbf0c5926787bb2653626679453 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Mon, 18 Nov 2024 11:02:29 +0100 Subject: [PATCH 060/160] feat(defaults): add default for loki tsdb schema v13 migration --- defaults/ekscluster-kfd-v1alpha2.yaml | 2 ++ defaults/kfddistribution-kfd-v1alpha2.yaml | 2 ++ defaults/onpremises-kfd-v1alpha2.yaml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 6c708be00..6a65fe780 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -113,6 +113,8 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket + tsdbSchemav13Migration: + enabled: false customOutputs: {} # monitoring module configuration monitoring: diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index d0c790257..75ae054d3 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -106,6 +106,8 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket + tsdbSchemav13Migration: + enabled: false customOutputs: {} # monitoring module configuration monitoring: diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml index f26ad1e6e..f6b628cd8 100644 --- a/defaults/onpremises-kfd-v1alpha2.yaml +++ b/defaults/onpremises-kfd-v1alpha2.yaml @@ -106,6 +106,8 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket + tsdbSchemav13Migration: + enabled: false customOutputs: {} # monitoring module configuration monitoring: From 89ecbd9657622f7bfd7cf9af7717ebd2d67354ef Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Mon, 18 Nov 2024 11:03:30 +0100 Subject: [PATCH 061/160] feat(templates): add tsdb_shipper configuration in loki-config patch template --- .../manifests/logging/patches/loki-config.yaml.tpl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl index b76f9316d..9dc91d0db 100644 --- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl +++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl @@ -104,6 +104,14 @@ storage_config: cache_ttl: 24h resync_interval: 5s shared_store: s3 +{{- if .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.enabled }} + tsdb_shipper: + active_index_directory: /var/loki/index + cache_location: /var/loki/cache + cache_ttl: 24h + resync_interval: 5s + shared_store: s3 +{{- end }} filesystem: directory: /var/loki/chunks table_manager: From 099bfe9a6b25338cbfe3325b865128a9226ce429 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Mon, 18 Nov 2024 16:32:30 +0100 Subject: [PATCH 062/160] feat: changes to configmap to support ingress v3 --- .../ingress/patches/eks-ingress-nginx-external.yml.tpl | 2 +- .../ingress/patches/eks-ingress-nginx-internal.yml.tpl | 2 +- .../manifests/ingress/patches/eks-ingress-nginx.yml.tpl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl index 663d748ea..a18265814 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-external.yml.tpl @@ -22,7 +22,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration-external + name: ingress-nginx-controller-external namespace: ingress-nginx data: use-proxy-protocol: "true" diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl index a7aa6f6ad..6ae0a6b14 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx-internal.yml.tpl @@ -21,7 +21,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration-internal + name: ingress-nginx-controller-internal namespace: ingress-nginx data: use-proxy-protocol: "true" diff --git a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl index 08dc64d82..60b7771ba 100644 --- a/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl +++ b/templates/distribution/manifests/ingress/patches/eks-ingress-nginx.yml.tpl @@ -22,7 +22,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: nginx-configuration + name: ingress-nginx-controller namespace: ingress-nginx data: use-proxy-protocol: "true" From 80d42e6be9e08f802b60195d31fb414fd5dcc02b Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Mon, 18 Nov 2024 17:13:07 +0100 Subject: [PATCH 063/160] feat: test new logging 3.5.0-rc.1 version and change OnPremises schema on Loki for the required value of the tsdb date --- kfd.yaml | 2 +- schemas/public/onpremises-kfd-v1alpha2.json | 48 +++++++++---------- .../logging/patches/loki-config.yaml.tpl | 6 +-- 3 files changed, 25 insertions(+), 31 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index 48cceb0db..203461a07 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,7 @@ modules: aws: v4.2.1 dr: v2.3.0 ingress: v2.3.3 - logging: v3.4.1 + logging: v3.5.0-rc.1 monitoring: v3.2.0 opa: v1.12.0 networking: v1.17.0 diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index b9f55a6cf..5ed4b6ada 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1061,6 +1061,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -1182,34 +1196,18 @@ } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + }, "resources": { "$ref": "#/$defs/Types.KubeResources" - }, - "tsdbSchemav13Migration": { - "type": "object", - "additionalProperties": false, - "properties": { - "enabled": { - "type": "boolean", - "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" - }, - "schemaConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "tsdbStartDate": { - "type": "string", - "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" - } - } - } - }, - "required": [ - "enabled" - ] } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl index 9dc91d0db..9d721a828 100644 --- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl +++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl @@ -77,15 +77,13 @@ schema_config: object_store: s3 schema: v11 store: boltdb-shipper -{{- if .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.enabled }} - - from: {{ .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.tsdbStartDate }} + - from: "{{ .spec.distribution.modules.logging.loki.tsdbStartDate }}" index: period: 24h prefix: index_ object_store: s3 schema: v13 store: tsdb -{{- end }} server: http_listen_port: 3100 storage_config: @@ -104,14 +102,12 @@ storage_config: cache_ttl: 24h resync_interval: 5s shared_store: s3 -{{- if .spec.distribution.modules.logging.loki.tsdbSchemav13Migration.enabled }} tsdb_shipper: active_index_directory: /var/loki/index cache_location: /var/loki/cache cache_ttl: 24h resync_interval: 5s shared_store: s3 -{{- end }} filesystem: directory: /var/loki/chunks table_manager: From 099813345d5946ba74d51250c7c240179ca98446 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 17:25:28 +0100 Subject: [PATCH 064/160] feat(monitoring): update to monitoring v3.3.0-rc.1 - Update kfd.yaml to use monitoring v3.3.0-rc1. - Update templates to align the scrapeConfigs to what has been done in the monitoring module. --- kfd.yaml | 2 +- .../resources/prometheus-agent/prometheus-agent.yaml.tpl | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index 48cceb0db..3cc038c28 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -9,8 +9,8 @@ modules: dr: v2.3.0 ingress: v2.3.3 logging: v3.4.1 - monitoring: v3.2.0 opa: v1.12.0 + monitoring: v3.3.0-rc.1 networking: v1.17.0 tracing: v1.1.0 kubernetes: diff --git a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl index 062a63c94..a616ece68 100644 --- a/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl +++ b/templates/distribution/manifests/monitoring/resources/prometheus-agent/prometheus-agent.yaml.tpl @@ -30,9 +30,7 @@ spec: probeSelector: {} serviceMonitorNamespaceSelector: {} serviceMonitorSelector: {} - scrapeConfigSelector: - matchLabels: - prometheus: k8s + scrapeConfigSelector: {} {{- $prometheusAgentArgs := dict "module" "monitoring" "package" "prometheusAgent" "spec" .spec }} tolerations: From efca435e2fcedd4100d258b68dbcfe65e33599a0 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Mon, 18 Nov 2024 17:37:22 +0100 Subject: [PATCH 065/160] fix: loki-config template generation when using a different logging type than loki --- .../distribution/manifests/logging/patches/loki-config.yaml.tpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl index 9d721a828..7d9c30240 100644 --- a/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl +++ b/templates/distribution/manifests/logging/patches/loki-config.yaml.tpl @@ -77,6 +77,7 @@ schema_config: object_store: s3 schema: v11 store: boltdb-shipper +{{- if and (index .spec.distribution.modules.logging "loki") (index .spec.distribution.modules.logging.loki "tsdbStartDate") }} - from: "{{ .spec.distribution.modules.logging.loki.tsdbStartDate }}" index: period: 24h @@ -84,6 +85,7 @@ schema_config: object_store: s3 schema: v13 store: tsdb +{{- end }} server: http_listen_port: 3100 storage_config: From 68af9b44ed70ddefa1171a642ad2bc105ba4d656 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Mon, 18 Nov 2024 17:37:58 +0100 Subject: [PATCH 066/160] feat: align schemas with onpremises --- schemas/public/ekscluster-kfd-v1alpha2.json | 48 +++++++++---------- .../public/kfddistribution-kfd-v1alpha2.json | 48 +++++++++---------- 2 files changed, 46 insertions(+), 50 deletions(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index a066a538f..00881525e 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1465,6 +1465,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -1581,34 +1595,18 @@ } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + }, "resources": { "$ref": "#/$defs/Types.KubeResources" - }, - "tsdbSchemav13Migration": { - "type": "object", - "additionalProperties": false, - "properties": { - "enabled": { - "type": "boolean", - "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" - }, - "schemaConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "tsdbStartDate": { - "type": "string", - "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" - } - } - } - }, - "required": [ - "enabled" - ] } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index b0af22493..69de3c38a 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -482,6 +482,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -598,34 +612,18 @@ } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + }, "resources": { "$ref": "#/$defs/Types.KubeResources" - }, - "tsdbSchemav13Migration": { - "type": "object", - "additionalProperties": false, - "properties": { - "enabled": { - "type": "boolean", - "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" - }, - "schemaConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "tsdbStartDate": { - "type": "string", - "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" - } - } - } - }, - "required": [ - "enabled" - ] } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Operator": { "type": "object", From bd6c98b717bbd5d0f4b20c4c9850b7ec2c1bad96 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Mon, 18 Nov 2024 17:39:43 +0100 Subject: [PATCH 067/160] feat: regenerate schemas and docs --- docs/schemas/ekscluster-kfd-v1alpha2.md | 232 +- docs/schemas/kfddistribution-kfd-v1alpha2.md | 194 +- docs/schemas/onpremises-kfd-v1alpha2.md | 196 +- .../ekscluster/v1alpha2/private/schema.go | 2720 ++++++------ pkg/apis/ekscluster/v1alpha2/public/schema.go | 2327 +++++----- .../kfddistribution/v1alpha2/public/schema.go | 3835 +++++++++-------- pkg/apis/onpremises/v1alpha2/public/schema.go | 1021 ++--- schemas/private/ekscluster-kfd-v1alpha2.json | 48 +- 8 files changed, 5344 insertions(+), 5229 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 387244539..6cc68ce46 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -33,7 +33,7 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -145,7 +145,7 @@ The tolerations that will be added to the pods for all the KFD modules ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -163,7 +163,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -210,7 +210,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -418,7 +418,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -609,7 +609,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -627,7 +627,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -677,7 +677,7 @@ The tolerations that will be added to the pods for the auth module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -695,7 +695,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -791,7 +791,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -805,7 +805,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -915,7 +915,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -990,7 +990,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1008,7 +1008,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1076,7 +1076,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1094,7 +1094,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1149,7 +1149,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1167,7 +1167,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1235,7 +1235,7 @@ The tolerations that will be added to the pods for the cluster autoscaler module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1253,7 +1253,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1303,7 +1303,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1321,7 +1321,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1381,7 +1381,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1399,7 +1399,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1420,7 +1420,7 @@ The type of the DR, must be ***none*** or ***eks*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------| @@ -1460,7 +1460,7 @@ The region where the velero bucket is located ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -1528,7 +1528,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1546,7 +1546,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1675,7 +1675,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1716,7 +1716,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1734,7 +1734,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1791,7 +1791,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1809,7 +1809,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1906,7 +1906,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1924,7 +1924,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1985,7 +1985,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2003,7 +2003,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2033,7 +2033,7 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -2069,7 +2069,7 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2148,7 +2148,7 @@ The tolerations that will be added to the pods for the ingress module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2166,7 +2166,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2236,7 +2236,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2254,7 +2254,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2343,12 +2343,13 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | ## .spec.distribution.modules.logging.loki.backend ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2448,6 +2449,12 @@ The cpu request for the prometheus pods The memory request for the opensearch pods +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +The date loki have to switch to TSDB and schema v13 + ## .spec.distribution.modules.logging.minio ### Properties @@ -2492,7 +2499,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2510,7 +2517,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2595,7 +2602,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2613,7 +2620,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2691,7 +2698,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2740,7 +2747,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2758,7 +2765,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2808,7 +2815,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2826,7 +2833,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2847,7 +2854,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2948,7 +2955,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2966,7 +2973,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3031,7 +3038,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3049,7 +3056,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3116,7 +3123,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3134,7 +3141,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3166,7 +3173,7 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3249,7 +3256,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3267,7 +3274,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3330,7 +3337,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3348,7 +3355,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3425,7 +3432,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3443,7 +3450,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3626,7 +3633,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3677,7 +3684,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3695,7 +3702,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3751,7 +3758,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3769,7 +3776,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3824,7 +3831,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3842,7 +3849,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3891,7 +3898,7 @@ The enforcement action to use for the gatekeeper module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3939,7 +3946,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3957,7 +3964,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4027,7 +4034,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4045,7 +4052,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4066,7 +4073,7 @@ The validation failure action to use for the kyverno module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -4110,7 +4117,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4128,7 +4135,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4149,7 +4156,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4212,7 +4219,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4230,7 +4237,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4307,7 +4314,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4325,7 +4332,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4357,7 +4364,7 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4440,7 +4447,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4458,7 +4465,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4485,7 +4492,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4919,7 +4926,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------------| @@ -5032,7 +5039,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5111,7 +5118,7 @@ The type of the FW rule can be ingress or egress ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5190,7 +5197,7 @@ The type of the FW rule can be ingress or egress ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -5242,7 +5249,7 @@ The container runtime to use for the nodes ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -5285,7 +5292,7 @@ The size of the disk in GB ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------| @@ -5365,7 +5372,7 @@ AWS tags that will be added to the ASG and EC2 instances ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------------| @@ -5380,7 +5387,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------------| @@ -5474,14 +5481,15 @@ Overrides the default IAM role name prefix for the EKS workers ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5489,6 +5497,12 @@ Overrides the default IAM role name prefix for the EKS workers The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description @@ -5580,7 +5594,7 @@ The name of the kustomize plugin ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| @@ -5687,7 +5701,7 @@ This value defines in which region the bucket is located ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-----------------| diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 6118a1540..919eb6d5a 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -29,7 +29,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -137,7 +137,7 @@ The tolerations that will be added to the pods for all the KFD modules ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -155,7 +155,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -202,7 +202,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -410,7 +410,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -606,7 +606,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -624,7 +624,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -674,7 +674,7 @@ The tolerations that will be added to the pods for the auth module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -692,7 +692,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -788,7 +788,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -802,7 +802,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -912,7 +912,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -967,7 +967,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -985,7 +985,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1006,7 +1006,7 @@ The type of the DR, must be ***none*** or ***on-premises*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1032,7 +1032,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1119,7 +1119,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1137,7 +1137,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1265,7 +1265,7 @@ The type of the cluster issuer, must be ***http01*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1305,7 +1305,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1323,7 +1323,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1378,7 +1378,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1396,7 +1396,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1457,7 +1457,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1475,7 +1475,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1505,7 +1505,7 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1541,7 +1541,7 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1620,7 +1620,7 @@ The tolerations that will be added to the pods for the ingress module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1638,7 +1638,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1708,7 +1708,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1726,7 +1726,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1815,12 +1815,13 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | ## .spec.distribution.modules.logging.loki.backend ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1920,6 +1921,12 @@ The cpu request for the prometheus pods The memory request for the opensearch pods +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +The date loki have to switch to TSDB and schema v13 + ## .spec.distribution.modules.logging.minio ### Properties @@ -1964,7 +1971,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1982,7 +1989,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2067,7 +2074,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2085,7 +2092,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2163,7 +2170,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2212,7 +2219,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2230,7 +2237,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2280,7 +2287,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2298,7 +2305,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2319,7 +2326,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2420,7 +2427,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2438,7 +2445,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2503,7 +2510,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2521,7 +2528,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2588,7 +2595,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2606,7 +2613,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2638,7 +2645,7 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2721,7 +2728,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2739,7 +2746,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2802,7 +2809,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2820,7 +2827,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2897,7 +2904,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2915,7 +2922,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3098,7 +3105,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3149,7 +3156,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3167,7 +3174,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3237,7 +3244,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3255,7 +3262,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3317,7 +3324,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3335,7 +3342,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3390,7 +3397,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3408,7 +3415,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3429,7 +3436,7 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3473,7 +3480,7 @@ The enforcement action to use for the gatekeeper module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3521,7 +3528,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3539,7 +3546,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3609,7 +3616,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3627,7 +3634,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3648,7 +3655,7 @@ The validation failure action to use for the kyverno module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3692,7 +3699,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3710,7 +3717,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3731,7 +3738,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -3794,7 +3801,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3812,7 +3819,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3889,7 +3896,7 @@ The tolerations that will be added to the pods for the monitoring module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3907,7 +3914,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3939,7 +3946,7 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4022,7 +4029,7 @@ The tolerations that will be added to the pods for the cert-manager module ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4040,7 +4047,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4067,7 +4074,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -4102,14 +4109,15 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -4117,6 +4125,12 @@ The type of tracing to use, either ***none*** or ***tempo*** The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index f620b0661..886fe3262 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -29,7 +29,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -149,7 +149,7 @@ An array with the tolerations that will be added to the pods for all the KFD mod ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -167,7 +167,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -214,7 +214,7 @@ The behavior of the configmap ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -422,7 +422,7 @@ The behavior of the secret ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -632,7 +632,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -650,7 +650,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -827,7 +827,7 @@ Set to override the tolerations that will be added to the pods of the Auth modul ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -845,7 +845,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -941,7 +941,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -955,7 +955,7 @@ override default routes for KFD components ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1072,7 +1072,7 @@ The type of the Auth provider, options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------| @@ -1135,7 +1135,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1153,7 +1153,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1174,7 +1174,7 @@ The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disab ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1204,7 +1204,7 @@ The storage backend type for Velero. `minio` will use an in-cluster MinIO deploy ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1291,7 +1291,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1309,7 +1309,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1446,7 +1446,7 @@ The type of the clusterIssuer. Only `http01` challenge is supported for on-premi ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1486,7 +1486,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1504,7 +1504,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1559,7 +1559,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1577,7 +1577,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1638,7 +1638,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1656,7 +1656,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1686,7 +1686,7 @@ The provider of the TLS certificates for the ingresses, one of: `none`, `certMan ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------------| @@ -1737,7 +1737,7 @@ The type of the nginx ingress controller, options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1820,7 +1820,7 @@ Set to override the tolerations that will be added to the pods of the Ingress mo ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1838,7 +1838,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -1918,7 +1918,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -1936,7 +1936,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2025,6 +2025,7 @@ This value defines where the output from the `systemdEtcd` Flow will be sent. Th | [backend](#specdistributionmoduleslogginglokibackend) | `string` | Optional | | [externalEndpoint](#specdistributionmoduleslogginglokiexternalendpoint) | `object` | Optional | | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | +| [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | ### Description @@ -2038,7 +2039,7 @@ The storage backend type for Loki. `minio` will use an in-cluster MinIO deployme ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2142,6 +2143,12 @@ The cpu request for the loki pods The memory request for the prometheus pods +## .spec.distribution.modules.logging.loki.tsdbStartDate + +### Description + +The date loki have to switch to TSDB and schema v13 + ## .spec.distribution.modules.logging.minio ### Properties @@ -2190,7 +2197,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2208,7 +2215,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2293,7 +2300,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2311,7 +2318,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2389,7 +2396,7 @@ The type of OpenSearch deployment. One of: `single` for a single replica or `tri ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2442,7 +2449,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2460,7 +2467,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2514,7 +2521,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2532,7 +2539,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2557,7 +2564,7 @@ Selects the logging stack. Options are: ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------------| @@ -2658,7 +2665,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2676,7 +2683,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2741,7 +2748,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2759,7 +2766,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2826,7 +2833,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2844,7 +2851,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -2880,7 +2887,7 @@ The storage backend type for Mimir. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2967,7 +2974,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -2985,7 +2992,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3052,7 +3059,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3070,7 +3077,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3151,7 +3158,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3169,7 +3176,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3352,7 +3359,7 @@ The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or ` ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:------------------| @@ -3403,7 +3410,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3421,7 +3428,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3499,7 +3506,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3517,7 +3524,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3587,7 +3594,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3605,7 +3612,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3660,7 +3667,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3678,7 +3685,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3699,7 +3706,7 @@ The type of CNI plugin to use, either `calico` (default, via the Tigera Operator ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3750,7 +3757,7 @@ The default enforcement action to use for the included constraints. `deny` will ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3798,7 +3805,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3816,7 +3823,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3890,7 +3897,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3908,7 +3915,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -3929,7 +3936,7 @@ The validation failure action to use for the policies, `Enforce` will block when ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:----------| @@ -3977,7 +3984,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -3995,7 +4002,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4016,7 +4023,7 @@ The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------| @@ -4087,7 +4094,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4105,7 +4112,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4186,7 +4193,7 @@ Set to override the tolerations that will be added to the pods of the module. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4204,7 +4211,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4240,7 +4247,7 @@ The storage backend type for Tempo. `minio` will use an in-cluster MinIO deploym ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4327,7 +4334,7 @@ Set to override the tolerations that will be added to the pods of the package. ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -4345,7 +4352,7 @@ The key of the toleration ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:---------| @@ -4372,7 +4379,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:--------| @@ -5004,7 +5011,7 @@ Name for the node group. It will be also used as the node role label. It should ### Constraints -**enum**: the value of this property must be equal to one of the following values: +**enum**: the value of this property must be equal to one of the following string values: | Value | |:-------------------| @@ -5150,14 +5157,15 @@ The subnet CIDR to use for the Services network. ### Properties -| Property | Type | Required | -|:-----------------------------------------------|:---------|:---------| -| [chart](#specpluginshelmreleaseschart) | `string` | Required | -| [name](#specpluginshelmreleasesname) | `string` | Required | -| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | -| [set](#specpluginshelmreleasesset) | `array` | Optional | -| [values](#specpluginshelmreleasesvalues) | `array` | Optional | -| [version](#specpluginshelmreleasesversion) | `string` | Optional | +| Property | Type | Required | +|:---------------------------------------------------------------------------------|:----------|:---------| +| [chart](#specpluginshelmreleaseschart) | `string` | Required | +| [disableValidationOnInstall](#specpluginshelmreleasesdisablevalidationoninstall) | `boolean` | Optional | +| [name](#specpluginshelmreleasesname) | `string` | Required | +| [namespace](#specpluginshelmreleasesnamespace) | `string` | Required | +| [set](#specpluginshelmreleasesset) | `array` | Optional | +| [values](#specpluginshelmreleasesvalues) | `array` | Optional | +| [version](#specpluginshelmreleasesversion) | `string` | Optional | ## .spec.plugins.helm.releases.chart @@ -5165,6 +5173,12 @@ The subnet CIDR to use for the Services network. The chart of the release +## .spec.plugins.helm.releases.disableValidationOnInstall + +### Description + +Disable running `helm diff` validation when installing the plugin, it will still be done when upgrading. + ## .spec.plugins.helm.releases.name ### Description diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 5d3a616cb..366a89f5b 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -6,6 +6,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) // A Fury Cluster deployed through AWS's Elastic Kubernetes Service @@ -884,6 +886,9 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The date loki have to switch to TSDB and schema v13 + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -1784,786 +1789,871 @@ type SpecPluginsHelmReleasesElemSetElem struct { Value string `json:"value" yaml:"value" mapstructure:"value"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil -} +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) - return nil + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) - return nil + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") - } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDr(plain) - return nil +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") - } - type Plain SpecDistributionModulesDrVelero - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDrVelero(plain) - return nil +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") - } - type Plain SpecDistributionModulesDrVeleroEks - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDrVeleroEks(plain) - return nil +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } -const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") - } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesMonitoring(plain) - return nil -} + // This value defines which folder will be used to store all the states inside the + // bucket + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` -const ( - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" -) + // This value defines in which region the bucket is located + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) - } - *j = SpecDistributionModulesNetworkingType(v) - return nil -} +type TypesAwsArn string -const ( - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" -) +type TypesAwsIamRoleName string -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} +type TypesAwsIamRoleNamePrefix string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) - } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil -} +type TypesAwsIpProtocol string + +type TypesAwsRegion string const ( - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") - } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyGatekeeper(plain) - return nil -} +type TypesAwsS3BucketName string -const TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" +type TypesAwsS3BucketNamePrefix string -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", +type TypesAwsS3KeyPrefix string + +type TypesAwsSshPubKey string + +type TypesAwsSubnetId string + +type TypesAwsTags map[string]string + +type TypesAwsVpcId string + +type TypesCidr string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesIpAddress string + +type TypesKubeLabels map[string]string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeNodeSelector map[string]string + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeTaints []string + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +type TypesKubeTolerationEffect string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" +) + +type TypesKubeTolerationEffect_1 string + +const ( + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" +) + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +type TypesKubeTolerationOperator_1 string + +const ( + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +type TypesUri string + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -const ( - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } -const TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } -const ( - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesPolicy + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } -const ( - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" -) - -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -const ( - TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" - TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" - TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" -) - -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - *j = SpecDistributionModulesTracingType(v) - return nil -} - -const ( - TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" - TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" -) - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - *j = TypesAwsRegion(v) + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") - } - type Plain SpecDistributionModules + type Plain SpecKubernetesNodePoolAdditionalFirewallRules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } -type TypesAwsRegion string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistribution + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } -type TypesCidr string - -type TypesAwsS3BucketName string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecKubernetesNodePoolAmi var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecKubernetesNodePoolAmi(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") - } - type Plain SpecInfrastructureVpcNetwork - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecInfrastructureVpc(plain) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } -type TypesAwsS3BucketNamePrefix string - -type TypesTcpPort int - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesAws + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAws(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesAuth(plain) return nil } -type TypesAwsVpcId string - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpn - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecInfrastructureVpn(plain) + *j = SpecKubernetesLogsTypesElem(v) return nil } -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") } - type Plain SpecDistributionModulesAwsLoadBalancerController + type Plain SpecDistributionModulesAwsClusterAutoscaler var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } @@ -2586,1209 +2676,1115 @@ func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesAwsClusterAutoscaler + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } -type TypesAwsIamRoleNamePrefix string - -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + type Plain SpecDistributionModulesAwsLoadBalancerController + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecDistributionModulesAwsLoadBalancerController(plain) return nil } -type TypesAwsIamRoleName string - -type TypesAwsArn string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecKubernetesAPIServer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesAuthProvider + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + } + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") } - *j = SpecDistributionModulesAuthProviderType(v) + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } -type TypesAwsIpProtocol string - -type TypesAwsTags map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") + } + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + } + type Plain SpecDistributionModulesAws var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesAws(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecKubernetesNodePoolType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecInfrastructureVpc(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecKubernetesNodePool(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModulesDrType(v) return nil } -const TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - - // This value defines which folder will be used to store all the states inside the - // bucket - KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - - // This value defines in which region the bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` - - // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region - SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecDistributionModulesIngressCertManager + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecDistribution(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - type Plain SpecDistributionModulesIngressDNSPublic + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModules(plain) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecKubernetes(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") - } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesIngressExternalDNS + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressExternalDNS(plain) + *j = SpecDistributionModulesTracing(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = TypesAwsRegion(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") - } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolAmi - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesTracingType(v) return nil } +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecDistributionModulesPolicyType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + type Plain SpecToolsConfigurationTerraformState + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecToolsConfigurationTerraformState(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + } + type Plain SpecToolsConfigurationTerraform + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraform(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecToolsConfiguration(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") - } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - type Plain SpecKubernetesNodePoolInstance + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } -type TypesKubeLabels_1 map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecKubernetesNodePoolSize + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = TypesKubeToleration(plain) return nil } -type TypesAwsSubnetId string - -type TypesKubeTaints []string - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = TypesKubeTolerationOperator(v) return nil } +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecKubernetesNodePool + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecDistributionModulesDr(plain) return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - -type TypesKubeLabels map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil } -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = TypesKubeTolerationEffect_1(v) return nil } -type TypesAwsS3KeyPrefix string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -3814,436 +3810,404 @@ func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } -type SpecToolsConfigurationTerraformState struct { - // S3 corresponds to the JSON schema field "s3". - S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecToolsConfigurationTerraformState + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } -type SpecToolsConfigurationTerraform struct { - // State corresponds to the JSON schema field "state". - State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecToolsConfigurationTerraform + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraform(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + } + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - type Plain SpecToolsConfiguration + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + type Plain SpecDistributionModulesIngressExternalDNS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecDistributionModulesIngressExternalDNS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain Spec + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + return fmt.Errorf("field key in TypesKubeToleration_1: required") } - type Plain TypesKubeToleration + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) - return nil -} - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) - return nil -} - -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) + *j = TypesKubeToleration_1(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeTolerationEffect_1 string - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ +var enumValues_TypesKubeTolerationEffect = []interface{}{ "NoSchedule", "PreferNoSchedule", "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -const ( - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" -) - -type TypesKubeTolerationOperator_1 string - -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + } + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } -const ( - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") } - type Plain TypesKubeToleration_1 + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecDistributionModulesIngress(plain) return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil } -type TypesKubeTolerationEffect string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -type TypesAwsSshPubKey string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesIpAddress string - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesUri string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4262,8 +4226,24 @@ func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { return nil } -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -4286,7 +4266,25 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeNodeSelector map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 8d2d43563..76248e752 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -6,6 +6,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) // A Fury Cluster deployed through AWS's Elastic Kubernetes Service @@ -844,6 +846,9 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The date loki have to switch to TSDB and schema v13 + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -1814,713 +1819,482 @@ const ( TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) -type TypesAwsS3BucketName string - -type TypesAwsS3BucketNamePrefix string - -type TypesAwsS3KeyPrefix string - -type TypesAwsSshPubKey string - -type TypesAwsSubnetId string - -type TypesAwsTags map[string]string - -type TypesAwsVpcId string - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + } + *j = SpecKubernetesNodePoolInstanceVolumeType(v) + return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil } -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} -type TypesKubeNodeSelector_1 map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil +} - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -type TypesKubeTaints []string - -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - -const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionCommonProvider + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesTracing(plain) return nil } +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecDistribution(plain) return nil } +type TypesCidr string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") - } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) - } - *j = SpecKubernetesLogsTypesElem(v) - return nil -} - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) - } - *j = SpecKubernetesNodePoolContainerRuntime(v) - return nil -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionModulesLoggingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecInfrastructureVpc(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") - } - type Plain SpecKubernetesAwsAuthRole - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAwsAuthRole(plain) - return nil +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) - return nil -} - -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) } - *j = TypesAwsRegion(v) + *j = SpecInfrastructureVpnSsh(plain) return nil } +type TypesAwsVpcId string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } @@ -2546,506 +2320,629 @@ func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") - } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesAPIServer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - type Plain SpecInfrastructureVpcNetwork + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + } + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistribution + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - type Plain SpecDistributionModules + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecKubernetesNodePool + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) - } - *j = SpecKubernetesNodePoolsLaunchKind(v) - return nil -} +type TypesAwsTags map[string]string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecDistributionModulesPolicy + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionModulesDr(plain) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. @@ -3069,950 +2966,1102 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecKubernetesNodePoolSize + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecToolsConfigurationTerraformState + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) - } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil -} +const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecToolsConfigurationTerraform + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraform(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } +const TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecKubernetesNodePoolAdditionalFirewallRules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } +const TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") } - type Plain SpecToolsConfiguration + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + } + type Plain SpecKubernetesNodePoolAmi var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecKubernetesNodePoolAmi(plain) return nil } +const TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } +const ( + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" +) + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain Spec + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } +const ( + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecDistributionModulesDr(plain) + *j = TypesAwsRegion(v) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain TypesKubeToleration + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } +type TypesAwsS3BucketName string + +type TypesKubeLabels_1 map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + } + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } +type TypesAwsSubnetId string + +type TypesKubeTaints []string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesDrType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") - } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesMonitoring(plain) - return nil +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = TypesKubeTolerationOperator(v) + *j = SpecKubernetesNodePoolType(v) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecKubernetesNodePool(plain) return nil } +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = TypesKubeTolerationEffect(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecKubernetes(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) - return nil -} - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) - } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - *j = SpecDistributionModulesMonitoringType(v) + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} +type TypesAwsS3KeyPrefix string // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + } + type Plain SpecToolsConfigurationTerraformStateS3 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + type Plain SpecToolsConfigurationTerraformState + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecToolsConfigurationTerraformState(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain TypesKubeToleration_1 + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecToolsConfiguration(plain) return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + return fmt.Errorf("field kubernetes in Spec: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = TypesKubeToleration(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` } +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = TypesKubeTolerationOperator(v) return nil } +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = TypesKubeTolerationEffect_1(v) return nil } +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = TypesKubeTolerationOperator_1(v) return nil } +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = TypesKubeToleration_1(plain) return nil } +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } +type TypesAwsSshPubKey string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecDistributionCommonProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionCommonProvider(plain) return nil } +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string @@ -4033,23 +4082,7 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil -} +type TypesKubeNodeSelector map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 9a45ff86a..0cb617b8c 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -6,8 +6,88 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionCommonProvider struct { + // The type of the provider + Type string `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *KfddistributionKfdV1Alpha2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["apiVersion"]; !ok || v == nil { + return fmt.Errorf("field apiVersion in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["kind"]; !ok || v == nil { + return fmt.Errorf("field kind in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["metadata"]; !ok || v == nil { + return fmt.Errorf("field metadata in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["spec"]; !ok || v == nil { + return fmt.Errorf("field spec in KfddistributionKfdV1Alpha2: required") + } + type Plain KfddistributionKfdV1Alpha2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = KfddistributionKfdV1Alpha2(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} + type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -22,99 +102,306 @@ type KfddistributionKfdV1Alpha2 struct { Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` } -type KfddistributionKfdV1Alpha2Kind string - const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" -type Metadata struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil } -type Spec struct { - // Distribution corresponds to the JSON schema field "distribution". - Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - - // DistributionVersion corresponds to the JSON schema field "distributionVersion". - DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` - - // Plugins corresponds to the JSON schema field "plugins". - Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *KfddistributionKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_KfddistributionKfdV1Alpha2Kind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_KfddistributionKfdV1Alpha2Kind, v) + } + *j = KfddistributionKfdV1Alpha2Kind(v) + return nil } -type SpecDistribution struct { - // Common corresponds to the JSON schema field "common". - Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` - - // CustomPatches corresponds to the JSON schema field "customPatches". - CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - - // The kubeconfig file path - Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` +type KfddistributionKfdV1Alpha2Kind string - // Modules corresponds to the JSON schema field "modules". - Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + } + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil } -type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Provider corresponds to the JSON schema field "provider". - Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for the plugin too. - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The relative path to the vendor directory, does not need to be changed - RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` -type SpecDistributionCommonProvider struct { - // The type of the provider - Type string `json:"type" yaml:"type" mapstructure:"type"` + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` } -type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource - -type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { - // The behavior of the configmap - Behavior *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` - - // The envs of the configmap - Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` - - // The files of the configmap - Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` - - // The literals of the configmap - Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` - - // The name of the configmap - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the configmap - Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil } -type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration_1(plain) + return nil +} -const ( +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + } + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + } + *j = TypesKubeTolerationEffect_1(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil +} + +type SpecDistributionCommon struct { + // The node selector to use to place the pods for all the KFD modules + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` + + // URL of the registry where to pull images from for the Distribution phase. + // (Default is registry.sighup.io/fury). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The relative path to the vendor directory, does not need to be changed + RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` + + // The tolerations that will be added to the pods for all the KFD modules + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +const ( SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" ) +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +} + type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { // The annotations of the configmap Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` @@ -129,23 +416,52 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` } -// Each entry should follow the format of Kustomize's images patch -type SpecDistributionCustomPatchesImages []map[string]interface{} +type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { + // The behavior of the configmap + Behavior *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` -type SpecDistributionCustomPatchesPatch struct { - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + // The envs of the configmap + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` - // The patch content - Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + // The files of the configmap + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` - // The path of the patch - Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + // The literals of the configmap + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` - // Target corresponds to the JSON schema field "target". - Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` + // The name of the configmap + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the configmap + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + +// Each entry should follow the format of Kustomize's images patch +type SpecDistributionCustomPatchesImages []map[string]interface{} + type SpecDistributionCustomPatchesPatchOptions struct { // If true, the kind change will be allowed AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` @@ -177,46 +493,91 @@ type SpecDistributionCustomPatchesPatchTarget struct { Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch // Each entry should be either a relative file path or an inline content resolving // to a partial or complete resource definition type SpecDistributionCustomPatchesPatchesStrategicMerge []string -type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string -type SpecDistributionCustomPatchesSecretGeneratorResource struct { - // The behavior of the secret - Behavior *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` - // The envs of the secret - Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` - // The files of the secret - Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` - // The literals of the secret - Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` - // The name of the secret - Name string `json:"name" yaml:"name" mapstructure:"name"` + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` - // The namespace of the secret - Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesSecretGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` - // The type of the secret - Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` } -type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} const ( SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" ) type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { @@ -233,6 +594,52 @@ type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` } +type SpecDistributionCustomPatchesSecretGeneratorResource struct { + // The behavior of the secret + Behavior *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the secret + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the secret + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the secret + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the secret + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the secret + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesSecretGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The type of the secret + Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil +} + +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + type SpecDistributionCustompatches struct { // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` @@ -251,48 +658,9 @@ type SpecDistributionCustompatches struct { SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` } -type SpecDistributionModules struct { - // Auth corresponds to the JSON schema field "auth". - Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - - // Dr corresponds to the JSON schema field "dr". - Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` - - // Ingress corresponds to the JSON schema field "ingress". - Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` - - // Logging corresponds to the JSON schema field "logging". - Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` - - // Monitoring corresponds to the JSON schema field "monitoring". - Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` - - // Networking corresponds to the JSON schema field "networking". - Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` - - // Policy corresponds to the JSON schema field "policy". - Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` - - // Tracing corresponds to the JSON schema field "tracing". - Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` -} - -type SpecDistributionModulesAuth struct { - // The base domain for the auth module - BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - - // Dex corresponds to the JSON schema field "dex". - Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Pomerium corresponds to the JSON schema field "pomerium". - Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} - // Provider corresponds to the JSON schema field "provider". - Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} type SpecDistributionModulesAuthDex struct { // The additional static clients for dex @@ -308,23 +676,22 @@ type SpecDistributionModulesAuthDex struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesAuthDexExpiry struct { - // Dex ID tokens expiration time duration (default 24h). - IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - - // Dex signing key expiration time duration (default 6h). - SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` -} - -type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the auth module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the auth module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil } type SpecDistributionModulesAuthOverridesIngress struct { @@ -335,125 +702,100 @@ type SpecDistributionModulesAuthOverridesIngress struct { IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } -type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} -type SpecDistributionModulesAuthPomerium interface{} +type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress -// override default routes for KFD components -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { - // GatekeeperPolicyManager corresponds to the JSON schema field - // "gatekeeperPolicyManager". - GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` +type SpecDistributionModulesAuthOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // HubbleUi corresponds to the JSON schema field "hubbleUi". - HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + // The node selector to use to place the pods for the auth module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // IngressNgnixForecastle corresponds to the JSON schema field - // "ingressNgnixForecastle". - IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + // The tolerations that will be added to the pods for the auth module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} - // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". - LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` +type SpecDistributionModulesAuthPomerium interface{} - // LoggingOpensearchDashboards corresponds to the JSON schema field - // "loggingOpensearchDashboards". - LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for the basic auth + Password string `json:"password" yaml:"password" mapstructure:"password"` - // MonitoringAlertmanager corresponds to the JSON schema field - // "monitoringAlertmanager". - MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + // The username for the basic auth + Username string `json:"username" yaml:"username" mapstructure:"username"` +} - // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". - MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} - // MonitoringMinioConsole corresponds to the JSON schema field - // "monitoringMinioConsole". - MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` +type SpecDistributionModulesAuthProviderType string - // MonitoringPrometheus corresponds to the JSON schema field - // "monitoringPrometheus". - MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". - TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` -} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} - -// Pomerium needs some user-provided secrets to be fully configured. These secrets -// should be unique between clusters. -type SpecDistributionModulesAuthPomeriumSecrets struct { - // Cookie Secret is the secret used to encrypt and sign session cookies. - // - // To generate a random key, run the following command: `head -c32 /dev/urandom | - // base64` - COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` - - // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth - // type is SSO, this value will be the secret used to authenticate Pomerium with - // Dex, **use a strong random value**. - IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` - - // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate - // requests between Pomerium services. It's critical that secret keys are random, - // and stored safely. - // - // To generate a key, run the following command: `head -c32 /dev/urandom | base64` - SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` - - // Signing Key is the base64 representation of one or more PEM-encoded private - // keys used to sign a user's attestation JWT, which can be consumed by upstream - // applications to pass along identifying user information like username, id, and - // groups. - // - // To generates an P-256 (ES256) signing key: - // - // ```bash - // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem - // # careful! this will output your private key in terminal - // cat ec_private.pem | base64 - // ``` - SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil } -// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. -type SpecDistributionModulesAuthPomerium_2 struct { - // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". - DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // DEPRECATED: Use defaultRoutesPolicy and/or routes - Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` - - // Additional routes configuration for Pomerium. Follows Pomerium's route format: - // https://www.pomerium.com/docs/reference/routes - Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` - - // Secrets corresponds to the JSON schema field "secrets". - Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` -} +const ( + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". @@ -463,61 +805,121 @@ type SpecDistributionModulesAuthProvider struct { Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil } -type SpecDistributionModulesAuthProviderType string +type SpecDistributionModulesAuth struct { + // The base domain for the auth module + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` -const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" - SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" - SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" -) + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` -type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** - Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` - // Velero corresponds to the JSON schema field "velero". - Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil } +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} + type SpecDistributionModulesDrType string +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + const ( SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) -type SpecDistributionModulesDrVelero struct { - // The storage backend type for Velero. `minio` will use an in-cluster MinIO - // deployment for object storage, `externalEndpoint` can be used to point to an - // external S3-compatible object storage instead of deploying an in-cluster MinIO. - Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // Configuration for Velero's external storage backend. - ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +type SpecDistributionModulesDrVeleroBackend string - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - // Configuration for Velero's backup schedules. - Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil } -type SpecDistributionModulesDrVeleroBackend string - const ( - SpecDistributionModulesDrVeleroBackendExternalEndpoint SpecDistributionModulesDrVeleroBackend = "externalEndpoint" SpecDistributionModulesDrVeleroBackendMinio SpecDistributionModulesDrVeleroBackend = "minio" + SpecDistributionModulesDrVeleroBackendExternalEndpoint SpecDistributionModulesDrVeleroBackend = "externalEndpoint" ) // Configuration for Velero's external storage backend. @@ -538,6 +940,16 @@ type SpecDistributionModulesDrVeleroExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Velero's schedules cron. +type SpecDistributionModulesDrVeleroSchedulesCron struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + // Configuration for Velero's backup schedules. type SpecDistributionModulesDrVeleroSchedules struct { // Configuration for Velero's schedules cron. @@ -553,43 +965,77 @@ type SpecDistributionModulesDrVeleroSchedules struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -// Configuration for Velero's schedules cron. -type SpecDistributionModulesDrVeleroSchedulesCron struct { - // The cron expression for the `full` backup schedule (default `0 1 * * *`). - Full *string `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` +type SpecDistributionModulesDrVelero struct { + // The storage backend type for Velero. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // The cron expression for the `manifests` backup schedule (default `*/15 * * * - // *`). - Manifests *string `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` -} + // Configuration for Velero's external storage backend. + ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` -type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone - BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // CertManager corresponds to the JSON schema field "certManager". - CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +} - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // Configurations for the nginx ingress controller module - Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + // The type of the DR, must be ***none*** or ***on-premises*** + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` } -type SpecDistributionModulesIngressCertManager struct { - // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". - ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistributionModulesIngressCertManagerClusterIssuerType string + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil } +const SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" + type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email of the cluster issuer Email string `json:"email" yaml:"email" mapstructure:"email"` @@ -604,42 +1050,86 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionModulesIngressCertManagerClusterIssuerType string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} -const SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` -type SpecDistributionModulesIngressForecastle struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesIngressNginx struct { +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} + +type SpecDistributionModulesIngressForecastle struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tls corresponds to the JSON schema field "tls". - Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** - Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** - Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +type SpecDistributionModulesIngressNginxTLSProvider string - // Secret corresponds to the JSON schema field "secret". - Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} -type SpecDistributionModulesIngressNginxTLSProvider string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} const ( SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" - SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" ) type SpecDistributionModulesIngressNginxTLSSecret struct { @@ -654,60 +1144,193 @@ type SpecDistributionModulesIngressNginxTLSSecret struct { Key string `json:"key" yaml:"key" mapstructure:"key"` } -type SpecDistributionModulesIngressNginxType string - -const ( - SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" - SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" - SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" -) - -type SpecDistributionModulesIngressOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} - // The node selector to use to place the pods for the ingress module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificate, must be ***none***, ***certManager*** or + // ***secret*** + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` - // The tolerations that will be added to the pods for the ingress module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` } -type SpecDistributionModulesIngressOverridesIngresses struct { - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil } -type SpecDistributionModulesLogging struct { - // Cerebro corresponds to the JSON schema field "cerebro". - Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - - // CustomOutputs corresponds to the JSON schema field "customOutputs". - CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - - // Loki corresponds to the JSON schema field "loki". - Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Opensearch corresponds to the JSON schema field "opensearch". - Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` +type SpecDistributionModulesIngressNginxType string - // Operator corresponds to the JSON schema field "operator". - Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") + } + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) + return nil +} - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an - // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. - Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +const ( + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" +) + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the nginx ingress controller, must be ***none***, ***single*** or + // ***dual*** + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the ingress module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the ingress module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // the base domain used for all the KFD ingresses, if in the nginx dual + // configuration, it should be the same as the + // .spec.distribution.modules.ingress.dns.private.name zone + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // CertManager corresponds to the JSON schema field "certManager". + CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the nginx ingress controller module + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } type SpecDistributionModulesLoggingCerebro struct { @@ -768,22 +1391,81 @@ type SpecDistributionModulesLoggingCustomOutputs struct { SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } -type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". - Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +type SpecDistributionModulesLoggingLokiBackend string - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // DistributionVersion corresponds to the JSON schema field "distributionVersion". + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` } -type SpecDistributionModulesLoggingLokiBackend string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} const ( - SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" ) type SpecDistributionModulesLoggingLokiExternalEndpoint struct { @@ -803,31 +1485,175 @@ type SpecDistributionModulesLoggingLokiExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -type SpecDistributionModulesLoggingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each minio disk, 6 disks total - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil } -type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username of the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["kubeconfig"]; !ok || v == nil { + return fmt.Errorf("field kubeconfig in SpecDistribution: required") + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil } -type SpecDistributionModulesLoggingOpensearch struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // The kubeconfig file path + Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +} + +type SpecDistributionModulesLoggingLoki struct { + // Backend corresponds to the JSON schema field "backend". + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The date loki have to switch to TSDB and schema v13 + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil +} + +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password of the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username of the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each minio disk, 6 disks total + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} + +type Metadata struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +const SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` // The storage size for the opensearch pods StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` @@ -836,76 +1662,113 @@ type SpecDistributionModulesLoggingOpensearch struct { Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecDistributionModulesLoggingOpensearchType string - -const ( - SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" - SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesLoggingType string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` + + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` +} const ( - SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" - SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + // selects the logging stack. Choosing none will disable the centralized logging. + // Choosing opensearch will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh + // for storage. Choosing customOuput the Logging Operator will be deployed and + // installed but with no local storage, you will have to create the needed Outputs + // and ClusterOutputs to ship the logs to your desired storage. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil } type SpecDistributionModulesMonitoringAlertManager struct { @@ -955,25 +1818,57 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` +type SpecDistributionModulesMonitoringMimirBackend string - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } -type SpecDistributionModulesMonitoringMimirBackend string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil +} const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" ) type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { @@ -993,15 +1888,18 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -type SpecDistributionModulesMonitoringMinio struct { +type SpecDistributionModulesMonitoringMimir struct { + // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + // The retention time for the mimir pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { @@ -1012,6 +1910,19 @@ type SpecDistributionModulesMonitoringMinioRootUser struct { Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheus struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1035,6 +1946,8 @@ type SpecDistributionModulesMonitoringPrometheus struct { StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheusAgent struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1049,17 +1962,51 @@ type SpecDistributionModulesMonitoringPrometheusAgent struct { Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} +type SpecDistributionModulesMonitoringType string -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil +} -type SpecDistributionModulesMonitoringType string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" ) type SpecDistributionModulesMonitoringX509Exporter struct { @@ -1067,112 +2014,75 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***none***, ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // MaskSize corresponds to the JSON schema field "maskSize". - MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // PodCidr corresponds to the JSON schema field "podCidr". - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` -} +// configuration for the Monitoring module components +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` -type SpecDistributionModulesNetworkingType string + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" - SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" -) + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + // The type of the monitoring, must be ***none***, ***prometheus***, + // ***prometheusAgent*** or ***mimir***. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, and in addition + // Grafana Mimir that allows for longer retention of metrics and the usage of + // Object Storage. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil } -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1187,934 +2097,525 @@ type SpecDistributionModulesTracing struct { Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +type SpecDistributionModulesNetworkingCilium struct { + // MaskSize corresponds to the JSON schema field "maskSize". + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // Disable running `helm diff` validation when installing the plugin, it will - // still be done when upgrading. - DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // PodCidr corresponds to the JSON schema field "podCidr". + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesNetworkingCilium var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesNetworkingCilium(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +type SpecDistributionModulesNetworkingType string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesTracingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesMonitoring - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesNetworkingType(v) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +const ( + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of networking to use, either ***none***, ***calico*** or ***cilium*** + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesNetworking var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesNetworking(plain) return nil } +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["maskSize"]; !ok || v == nil { - return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") - } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") - } - type Plain SpecDistributionModulesNetworkingCilium - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesNetworkingCilium(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", - "calico", - "cilium", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + // The enforcement action to use for the gatekeeper module + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesNetworking + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworking(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil -} + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the kyverno module + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } +type SpecDistributionModulesPolicyType string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *Metadata) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain Metadata var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngress(plain) - return nil -} +type SpecDistributionModulesLoggingType string -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of security to use, either ***none***, ***gatekeeper*** or + // ***kyverno*** + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil -} +const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} +type TypesKubeNodeSelector map[string]string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil -} +type SpecDistributionModulesTracingTempoBackend string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") - } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLS(plain) - return nil -} +type TypesKubeTolerationEffect string -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +type TypesKubeTaints []string + +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key id of the external tempo backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external tempo backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external tempo backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external tempo backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external tempo backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil +type SpecDistributionModulesTracingTempo struct { + // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the tempo pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") - } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) - return nil +type SpecDistributionModulesTracingType string + +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) - } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) - return nil +type TypesIpAddress string + +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +type TypesCidr string + +type TypesFileRef string + +type TypesEnvRef string + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") - } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManager(plain) - return nil -} +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") - } - type Plain SpecDistributionModulesPolicy - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicy(plain) - return nil + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) - return nil -} +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) - return nil + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", -} +type TypesKubeResourcesLimits struct { + // The cpu limit for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil -} +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") - } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDr(plain) - return nil -} + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) - } - *j = SpecDistributionModulesDrVeleroBackend(v) - return nil + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) - } - *j = SpecDistributionModulesTracingType(v) - return nil +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` } type TypesFuryModuleOverridesIngress struct { @@ -2128,682 +2629,202 @@ type TypesFuryModuleOverridesIngress struct { IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeLabels map[string]string + +type TypesKubeLabels_1 map[string]string + type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") - } - type Plain SpecDistributionModulesTracing - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesTracing(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["kubeconfig"]; !ok || v == nil { - return fmt.Errorf("field kubeconfig in SpecDistribution: required") - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") - } - type Plain SpecPluginsHelmReleasesElemSetElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecPluginsHelmReleasesElemSetElem(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - type Plain Spec - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil -} - -type TypesKubeLabels map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} + // The node selector to use to place the pods for the security module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") - } - type Plain TypesKubeToleration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = TypesKubeToleration(plain) - return nil + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` +const ( + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" +) - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` +type TypesKubeTolerationEffect_1 string - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +const ( + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" +) - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} +type TypesKubeTolerationOperator string const ( TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" ) -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) - return nil -} - -const TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - -type TypesKubeTolerationOperator string +type TypesKubeTolerationOperator_1 string -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} +const ( + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") - } - type Plain SpecDistributionModulesAuthPomeriumSecrets - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) - return nil -} +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` -type TypesKubeNodeSelector_1 map[string]string + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` -type TypesKubeTolerationEffect_1 string + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) - } - *j = TypesKubeTolerationEffect_1(v) - return nil -} +type TypesSemVer string -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) +type TypesSshPubKey string -type TypesKubeTolerationOperator_1 string +type TypesTcpPort int -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} +type TypesUri string -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) - } - *j = TypesKubeTolerationOperator_1(v) - return nil +var enumValues_KfddistributionKfdV1Alpha2Kind = []interface{}{ + "KFDDistribution", } -const ( - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") - } - type Plain TypesKubeToleration_1 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = TypesKubeToleration_1(plain) - return nil +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", } -const ( - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") - } - type Plain SpecDistributionModulesAuthPomerium_2 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthPomerium_2(plain) - return nil +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } -type TypesKubeTolerationEffect string +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} -type TypesIpAddress string +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} -type TypesKubeLabels_1 map[string]string +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} -type TypesKubeTaints []string +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} -type TypesSemVer string +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", + "calico", + "cilium", +} -type TypesSshPubKey string +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} -type TypesTcpPort int +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} -type TypesUri string +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") - } - type Plain SpecDistributionCommonProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCommonProvider(plain) - return nil +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } -var enumValues_KfddistributionKfdV1Alpha2Kind = []interface{}{ - "KFDDistribution", +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *KfddistributionKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_KfddistributionKfdV1Alpha2Kind { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_KfddistributionKfdV1Alpha2Kind, v) - } - *j = KfddistributionKfdV1Alpha2Kind(v) - return nil +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } -type TypesKubeNodeSelector map[string]string +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} -// UnmarshalJSON implements json.Unmarshaler. -func (j *Metadata) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in Metadata: required") - } - type Plain Metadata - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.Name) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "name", 1) - } - if len(plain.Name) > 56 { - return fmt.Errorf("field %s length: must be <= %d", "name", 56) - } - *j = Metadata(plain) - return nil +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *KfddistributionKfdV1Alpha2) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["apiVersion"]; !ok || v == nil { - return fmt.Errorf("field apiVersion in KfddistributionKfdV1Alpha2: required") - } - if v, ok := raw["kind"]; !ok || v == nil { - return fmt.Errorf("field kind in KfddistributionKfdV1Alpha2: required") - } - if v, ok := raw["metadata"]; !ok || v == nil { - return fmt.Errorf("field metadata in KfddistributionKfdV1Alpha2: required") - } - if v, ok := raw["spec"]; !ok || v == nil { - return fmt.Errorf("field spec in KfddistributionKfdV1Alpha2: required") - } - type Plain KfddistributionKfdV1Alpha2 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = KfddistributionKfdV1Alpha2(plain) - return nil +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", } diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 426125b61..b8288db5a 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -6,6 +6,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" ) type Metadata struct { @@ -901,6 +903,9 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The date loki have to switch to TSDB and schema v13 + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } type SpecDistributionModulesLoggingLokiBackend string @@ -1549,131 +1554,164 @@ type SpecKubernetesAdvancedAnsible struct { PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil +type SpecKubernetesAdvancedCloud struct { + // Sets cloud config for the Kubelet + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Sets the cloud provider for the Kubelet + Provider *string `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +// Advanced configuration for containerd +type SpecKubernetesAdvancedContainerd struct { + // RegistryConfigs corresponds to the JSON schema field "registryConfigs". + RegistryConfigs SpecKubernetesAdvancedContainerdRegistryConfigs `json:"registryConfigs,omitempty" yaml:"registryConfigs,omitempty" mapstructure:"registryConfigs,omitempty"` } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +// Allows specifying custom configuration for a registry at containerd level. You +// can set authentication details and mirrors for a registry. +// This feature can be used for example to authenticate to a private registry at +// containerd (container runtime) level, i.e. globally instead of using +// `imagePullSecrets`. It also can be used to use a mirror for a registry or to +// enable insecure connections to trusted registries that have self-signed +// certificates. +type SpecKubernetesAdvancedContainerdRegistryConfigs []struct { + // Set to `true` to skip TLS verification (e.g. when using self-signed + // certificates). + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" mapstructure:"insecureSkipVerify,omitempty"` + + // Array of URLs with the mirrors to use for the registry. Example: + // `["http://mymirror.tld:8080"]` + MirrorEndpoint []string `json:"mirrorEndpoint,omitempty" yaml:"mirrorEndpoint,omitempty" mapstructure:"mirrorEndpoint,omitempty"` + + // The password containerd will use to authenticate against the registry. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // Registry address on which you would like to configure authentication or + // mirror(s). Example: `myregistry.tld:5000` + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The username containerd will use to authenticate against the registry. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) - return nil +type SpecKubernetesAdvancedEncryption struct { + // etcd's encryption at rest configuration. Must be a string with the + // EncryptionConfiguration object in YAML. Example: + // + // ```yaml + // + // apiVersion: apiserver.config.k8s.io/v1 + // kind: EncryptionConfiguration + // resources: + // - resources: + // - secrets + // providers: + // - aescbc: + // keys: + // - name: mykey + // secret: base64_encoded_secret + // ``` + // + Configuration *string `json:"configuration,omitempty" yaml:"configuration,omitempty" mapstructure:"configuration,omitempty"` + + // The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. + // Example: + // ```yaml + // tlsCipherSuites: + // - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_AES_128_GCM_SHA256" + // - "TLS_AES_256_GCM_SHA384" + // - "TLS_CHACHA20_POLY1305_SHA256" + // ``` + TlsCipherSuites []string `json:"tlsCipherSuites,omitempty" yaml:"tlsCipherSuites,omitempty" mapstructure:"tlsCipherSuites,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil +// OIDC configuration for the Kubernetes API server. +type SpecKubernetesAdvancedOIDC struct { + // The path to the certificate for the CA that signed the identity provider's web + // certificate. Defaults to the host's root CAs. This should be a path available + // to the API Server. + CaFile *string `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` + + // The client ID the API server will use to authenticate to the OIDC provider. + ClientId *string `json:"client_id,omitempty" yaml:"client_id,omitempty" mapstructure:"client_id,omitempty"` + + // Prefix prepended to group claims to prevent clashes with existing names (such + // as system: groups). + GroupPrefix *string `json:"group_prefix,omitempty" yaml:"group_prefix,omitempty" mapstructure:"group_prefix,omitempty"` + + // JWT claim to use as the user's group. + GroupsClaim *string `json:"groups_claim,omitempty" yaml:"groups_claim,omitempty" mapstructure:"groups_claim,omitempty"` + + // The issuer URL of the OIDC provider. + IssuerUrl *string `json:"issuer_url,omitempty" yaml:"issuer_url,omitempty" mapstructure:"issuer_url,omitempty"` + + // JWT claim to use as the user name. The default value is `sub`, which is + // expected to be a unique identifier of the end user. + UsernameClaim *string `json:"username_claim,omitempty" yaml:"username_claim,omitempty" mapstructure:"username_claim,omitempty"` + + // Prefix prepended to username claims to prevent clashes with existing names + // (such as system: users). + UsernamePrefix *string `json:"username_prefix,omitempty" yaml:"username_prefix,omitempty" mapstructure:"username_prefix,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ip"]; !ok || v == nil { - return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecKubernetesLoadBalancersHost + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesLoadBalancersHost(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesAuth(plain) return nil } @@ -1689,6 +1727,8 @@ type TypesFuryModuleOverridesIngress struct { IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ "create", "replace", @@ -1715,8 +1755,6 @@ func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { type TypesCidr string -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - // Override the common configuration with a particular configuration for the // module. type TypesFuryModuleOverrides struct { @@ -1735,76 +1773,81 @@ var enumValues_SpecDistributionModulesDrType = []interface{}{ "on-premises", } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "calico", - "cilium", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesLoggingType(v) return nil } +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "calico", + "cilium", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesDrType(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. @@ -1825,19 +1868,32 @@ func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err @@ -1876,24 +1932,6 @@ func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1916,6 +1954,26 @@ var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []in "http01", } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -1937,24 +1995,9 @@ func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) - return nil +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", } var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ @@ -1983,30 +2026,25 @@ func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2028,6 +2066,24 @@ func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b return nil } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2050,20 +2106,20 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } @@ -2099,26 +2155,6 @@ var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ "none", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) - } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) - return nil -} - type TypesKubeResources struct { // Limits corresponds to the JSON schema field "limits". Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` @@ -2135,24 +2171,6 @@ type TypesKubeResourcesRequests struct { Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") - } - type Plain SpecDistributionModulesPolicy - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicy(plain) - return nil -} - type TypesKubeResourcesLimits struct { // The cpu limit for the loki pods Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` @@ -2161,6 +2179,36 @@ type TypesKubeResourcesLimits struct { Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } +// Configuration for HAProxy stats page. Accessible at http://:1936/stats +type SpecKubernetesLoadBalancersStats struct { + // The basic-auth password for HAProxy's stats page. + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The basic-auth username for HAProxy's stats page + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2228,12 +2276,6 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string @@ -2259,24 +2301,10 @@ var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ "externalEndpoint", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } // UnmarshalJSON implements json.Unmarshaler. @@ -2318,9 +2346,45 @@ func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) er return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLoadBalancersStats) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecKubernetesLoadBalancersStats: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesLoadBalancersStats: required") + } + type Plain SpecKubernetesLoadBalancersStats + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancersStats(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -2364,9 +2428,24 @@ func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -2405,23 +2484,22 @@ func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { return nil } -type SpecKubernetesLoadBalancersKeepalived struct { - // Set to install keepalived with a floating virtual IP shared between the load - // balancer hosts for a deployment in High Availability. - Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` - - // Name of the network interface where to bind the Keepalived virtual IP. - Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` - - // The Virtual floating IP for Keepalived - Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` - - // The passphrase for the Keepalived clustering. - Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` - - // The virtual router ID of Keepalived, must be different from other Keepalived - // instances in the same network. - VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -2471,24 +2549,11 @@ func (j *SpecDistribution) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) - } - *j = SpecDistributionModulesMonitoringMimirBackend(v) - return nil +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. @@ -2519,23 +2584,22 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b [] } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesAuthProviderType(v) return nil } @@ -2567,168 +2631,99 @@ func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b [] } *j = SpecKubernetesAdvancedAirGapDependenciesOverrideYum(plain) return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil -} - -type SpecKubernetesAdvancedCloud struct { - // Sets cloud config for the Kubelet - Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` - - // Sets the cloud provider for the Kubelet - Provider *string `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` -} - -// Allows specifying custom configuration for a registry at containerd level. You -// can set authentication details and mirrors for a registry. -// This feature can be used for example to authenticate to a private registry at -// containerd (container runtime) level, i.e. globally instead of using -// `imagePullSecrets`. It also can be used to use a mirror for a registry or to -// enable insecure connections to trusted registries that have self-signed -// certificates. -type SpecKubernetesAdvancedContainerdRegistryConfigs []struct { - // Set to `true` to skip TLS verification (e.g. when using self-signed - // certificates). - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" mapstructure:"insecureSkipVerify,omitempty"` - - // Array of URLs with the mirrors to use for the registry. Example: - // `["http://mymirror.tld:8080"]` - MirrorEndpoint []string `json:"mirrorEndpoint,omitempty" yaml:"mirrorEndpoint,omitempty" mapstructure:"mirrorEndpoint,omitempty"` - - // The password containerd will use to authenticate against the registry. - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // Registry address on which you would like to configure authentication or - // mirror(s). Example: `myregistry.tld:5000` - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - - // The username containerd will use to authenticate against the registry. - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -// Advanced configuration for containerd -type SpecKubernetesAdvancedContainerd struct { - // RegistryConfigs corresponds to the JSON schema field "registryConfigs". - RegistryConfigs SpecKubernetesAdvancedContainerdRegistryConfigs `json:"registryConfigs,omitempty" yaml:"registryConfigs,omitempty" mapstructure:"registryConfigs,omitempty"` -} - -type SpecKubernetesAdvancedEncryption struct { - // etcd's encryption at rest configuration. Must be a string with the - // EncryptionConfiguration object in YAML. Example: - // - // ```yaml - // - // apiVersion: apiserver.config.k8s.io/v1 - // kind: EncryptionConfiguration - // resources: - // - resources: - // - secrets - // providers: - // - aescbc: - // keys: - // - name: mykey - // secret: base64_encoded_secret - // ``` - // - Configuration *string `json:"configuration,omitempty" yaml:"configuration,omitempty" mapstructure:"configuration,omitempty"` - - // The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. - // Example: - // ```yaml - // tlsCipherSuites: - // - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" - // - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" - // - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - // - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" - // - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" - // - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" - // - "TLS_AES_128_GCM_SHA256" - // - "TLS_AES_256_GCM_SHA384" - // - "TLS_CHACHA20_POLY1305_SHA256" - // ``` - TlsCipherSuites []string `json:"tlsCipherSuites,omitempty" yaml:"tlsCipherSuites,omitempty" mapstructure:"tlsCipherSuites,omitempty"` -} - -// OIDC configuration for the Kubernetes API server. -type SpecKubernetesAdvancedOIDC struct { - // The path to the certificate for the CA that signed the identity provider's web - // certificate. Defaults to the host's root CAs. This should be a path available - // to the API Server. - CaFile *string `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` - - // The client ID the API server will use to authenticate to the OIDC provider. - ClientId *string `json:"client_id,omitempty" yaml:"client_id,omitempty" mapstructure:"client_id,omitempty"` - - // Prefix prepended to group claims to prevent clashes with existing names (such - // as system: groups). - GroupPrefix *string `json:"group_prefix,omitempty" yaml:"group_prefix,omitempty" mapstructure:"group_prefix,omitempty"` - - // JWT claim to use as the user's group. - GroupsClaim *string `json:"groups_claim,omitempty" yaml:"groups_claim,omitempty" mapstructure:"groups_claim,omitempty"` - - // The issuer URL of the OIDC provider. - IssuerUrl *string `json:"issuer_url,omitempty" yaml:"issuer_url,omitempty" mapstructure:"issuer_url,omitempty"` - - // JWT claim to use as the user name. The default value is `sub`, which is - // expected to be a unique identifier of the end user. - UsernameClaim *string `json:"username_claim,omitempty" yaml:"username_claim,omitempty" mapstructure:"username_claim,omitempty"` +} - // Prefix prepended to username claims to prevent clashes with existing names - // (such as system: users). - UsernamePrefix *string `json:"username_prefix,omitempty" yaml:"username_prefix,omitempty" mapstructure:"username_prefix,omitempty"` +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } -type SpecKubernetesAdvancedUsers struct { - // List of user names to create and get a kubeconfig file. Users will not have any - // permissions by default, RBAC setup for the new users is needed. - Names []string `json:"names,omitempty" yaml:"names,omitempty" mapstructure:"names,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} - // The organization the users belong to. - Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } -type TypesFuryModuleComponentOverrides struct { - // Set to override the node selector used to place the pods of the package. - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} - // Set to override the tolerations that will be added to the pods of the package. - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -2749,88 +2744,134 @@ func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) return nil } -type SpecKubernetesLoadBalancersHost struct { - // The IP address of the host. - Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` +type SpecKubernetesAdvancedUsers struct { + // List of user names to create and get a kubeconfig file. Users will not have any + // permissions by default, RBAC setup for the new users is needed. + Names []string `json:"names,omitempty" yaml:"names,omitempty" mapstructure:"names,omitempty"` - // A name to identify the host. This value will be concatenated to - // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as - // `.`. - Name string `json:"name" yaml:"name" mapstructure:"name"` + // The organization the users belong to. + Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesAuth + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecKubernetesLoadBalancersHost struct { + // The IP address of the host. + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecKubernetesLoadBalancersHost var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecKubernetesLoadBalancersHost(plain) return nil } -type TypesFileRef string +type SpecKubernetesLoadBalancersKeepalived struct { + // Set to install keepalived with a floating virtual IP shared between the load + // balancer hosts for a deployment in High Availability. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` -// Configuration for HAProxy stats page. Accessible at http://:1936/stats -type SpecKubernetesLoadBalancersStats struct { - // The basic-auth password for HAProxy's stats page. - Password string `json:"password" yaml:"password" mapstructure:"password"` + // Name of the network interface where to bind the Keepalived virtual IP. + Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` - // The basic-auth username for HAProxy's stats page - Username string `json:"username" yaml:"username" mapstructure:"username"` + // The Virtual floating IP for Keepalived + Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` + + // The passphrase for the Keepalived clustering. + Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` + + // The virtual router ID of Keepalived, must be different from other Keepalived + // instances in the same network. + VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersStats) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecKubernetesLoadBalancersStats: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesLoadBalancersStats: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecKubernetesLoadBalancersStats + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesLoadBalancersStats(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + type SpecKubernetesLoadBalancers struct { // Additional configuration to append to HAProxy's configuration file. AdditionalConfig *string `json:"additionalConfig,omitempty" yaml:"additionalConfig,omitempty" mapstructure:"additionalConfig,omitempty"` @@ -3343,22 +3384,20 @@ const ( ) // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + type Plain SpecKubernetesLoadBalancersKeepalived + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationOperator(v) + *j = SpecKubernetesLoadBalancersKeepalived(plain) return nil } @@ -3564,23 +3603,7 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { type TypesEnvRef string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enabled"]; !ok || v == nil { - return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") - } - type Plain SpecKubernetesLoadBalancersKeepalived - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesLoadBalancersKeepalived(plain) - return nil -} +type TypesFileRef string type TypesIpAddress string diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 542c53496..f931e45a6 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1081,6 +1081,20 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, { "if": { "properties": { @@ -1192,34 +1206,18 @@ } } }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "The date loki have to switch to TSDB and schema v13" + }, "resources": { "$ref": "#/$defs/Types.KubeResources" - }, - "tsdbSchemav13Migration": { - "type": "object", - "additionalProperties": false, - "properties": { - "enabled": { - "type": "boolean", - "description": "A flag that enables migration of existing clusters towards TSDB and schema v13" - }, - "schemaConfig": { - "type": "object", - "additionalProperties": false, - "properties": { - "tsdbStartDate": { - "type": "string", - "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" - } - } - } - }, - "required": [ - "enabled" - ] } - } + }, + "required": [ + "tsdbStartDate" + ] }, "Spec.Distribution.Modules.Logging.Minio": { "type": "object", From 91110773066fe2b0155ad11f35b7d5f6ac25d411 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 17:47:39 +0100 Subject: [PATCH 068/160] feat(policy): update to opa v1.13.0 Update kfd.yaml to point to OPA v1.13.0. No other changes are necessary --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 48cceb0db..48f9bd0a5 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -10,7 +10,7 @@ modules: ingress: v2.3.3 logging: v3.4.1 monitoring: v3.2.0 - opa: v1.12.0 + opa: v1.13.0 networking: v1.17.0 tracing: v1.1.0 kubernetes: From e7a846446006c8549a6657987f9982f9c60f8707 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 17:58:59 +0100 Subject: [PATCH 069/160] chore(kfd.yaml): maintain order in modules list --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index ae844d785..738499b73 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -9,8 +9,8 @@ modules: dr: v2.3.0 ingress: v2.3.3 logging: v3.4.1 - opa: v1.13.0 monitoring: v3.3.0-rc.1 + opa: v1.13.0 networking: v1.17.0 tracing: v1.1.0 kubernetes: From 5edecff50326625cf216fa32d253bd46d0a20a4c Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Mon, 18 Nov 2024 18:06:02 +0100 Subject: [PATCH 070/160] feat(docs): add release docs for v1.30.0 --- docs/releases/v1.30.0.md | 61 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 docs/releases/v1.30.0.md diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md new file mode 100644 index 000000000..8393a03f2 --- /dev/null +++ b/docs/releases/v1.30.0.md @@ -0,0 +1,61 @@ +# Kubernetes Fury Distribution Release v1.30.0 + +Welcome to KFD release `v1.30.0`. + +The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.io/) it is battle tested in production environments. + +## New Features since `v1.29.4` + +### Installer Updates + +- [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐Ÿ“ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) + - TBD +- [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.X.X**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.X.X) + - TBD + +### Module updates + +- [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/vX.X.X) + - TBD +- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/vX.X.X) + - TBD +- [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/vX.X.X) + - TBD +- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/vX.X.X) + - TBD +- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/vX.X.X) + - TBD +- [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/vX.X.X) + - TBD +- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/vX.X.X) + - TBD +- [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/vX.X.X) + - TBD + +## New features ๐ŸŒŸ + +- **New option for Logging**: The Loki configuration has been extended to accommodate the new `tsdbStartDate` option to allow a migration towards TSDB and schema v13 (note: **this is a breaking change**): + ```yaml + ... + loki: + tsdbStartDate: "2024-11-18" + ... + ``` + - `tsdbStartDate` (**required**): configures details for the schema config for the purpose of the migration + + `tsdbStartDate` should be a string in `ISO 8601` date format and it represents the day starting from which Loki will record logs with the new store and schema. + + โ„น๏ธ **Note**: Loki will assume the start of the day on the UTC midnight of the specified day. + +## Fixes ๐Ÿž + +- **TBD**: TBD. + + +## Breaking changes ๐Ÿ’” + +- **Loki store and schema change:** A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. + +## Upgrade procedure + +Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. From 66252dd87b094e85c281cbba1f8de5054f49576c Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 19:33:40 +0100 Subject: [PATCH 071/160] feat(ingress): use ingress v3.0.1-rc.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 576f2bd1c..870ab5912 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -7,7 +7,7 @@ modules: auth: v0.3.0 aws: v4.2.1 dr: v2.3.0 - ingress: v2.3.3 + ingress: v3.0.1-rc.0 logging: v3.4.1 monitoring: v3.2.0 opa: v1.12.0 From cd3c3c2f0067e029e9770d6ab0733e879c45db33 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 19:35:47 +0100 Subject: [PATCH 072/160] fix(monitoring/ingress): fix minio-monitoring ingress definition Fix template for minio-monitoring ingress that was pointing to minio-tracing service instead of the monitoring one. --- .../manifests/monitoring/resources/ingress-infra.yml.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl index 3d4f1fbe2..ef87719f5 100644 --- a/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl +++ b/templates/distribution/manifests/monitoring/resources/ingress-infra.yml.tpl @@ -177,7 +177,7 @@ spec: number: 80 {{ else }} service: - name: minio-tracing-console + name: minio-monitoring-console port: name: http {{ end }} From d56798cb531b72ba597bc27a7ee77ac2d0998ee9 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 18 Nov 2024 19:52:00 +0100 Subject: [PATCH 073/160] fix(migrations): make auth type from none to sso work Migration from auth type none to sso was broken because the infra ingress existing on their relative namespaces were not being deleted before trying to create the new ones in the `pomerium` namespace and the ingerss controller validating admission webhook (rightfuly) denied the request. Fixes #309 --- templates/distribution/scripts/pre-apply.sh.tpl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/templates/distribution/scripts/pre-apply.sh.tpl b/templates/distribution/scripts/pre-apply.sh.tpl index 409576e56..3971a88f5 100644 --- a/templates/distribution/scripts/pre-apply.sh.tpl +++ b/templates/distribution/scripts/pre-apply.sh.tpl @@ -700,6 +700,13 @@ echo "Finished clean up tasks for migrating Auth type from SSO to basicAuth." {{- end }} {{- end }} +{{- if eq .reducers.distributionModulesAuthProviderType.from "none" }} + {{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} + # we need to delete infra ingresses that are present on each namespace before switching to sso, because they will be recreated in the pomerium namespace. + deleteInfraIngresses + {{- end }} +{{- end }} + {{- if eq .reducers.distributionModulesAuthProviderType.from "basicAuth" }} {{- if eq .reducers.distributionModulesAuthProviderType.to "sso" }} echo "Running clean up tasks for migrating Auth type from basicAuth to SSO..." From 64416f732da154f5d4ea3344f0f3e516bdd930ed Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Tue, 19 Nov 2024 11:28:25 +0100 Subject: [PATCH 074/160] chore(docs): better explanation of the meaning of tsdbStartDate for loki package --- pkg/apis/ekscluster/v1alpha2/private/schema.go | 11 ++++++++++- pkg/apis/ekscluster/v1alpha2/public/schema.go | 11 ++++++++++- pkg/apis/kfddistribution/v1alpha2/public/schema.go | 11 ++++++++++- pkg/apis/onpremises/v1alpha2/public/schema.go | 11 ++++++++++- schemas/private/ekscluster-kfd-v1alpha2.json | 2 +- schemas/public/ekscluster-kfd-v1alpha2.json | 2 +- schemas/public/kfddistribution-kfd-v1alpha2.json | 2 +- schemas/public/onpremises-kfd-v1alpha2.json | 2 +- 8 files changed, 44 insertions(+), 8 deletions(-) diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 366a89f5b..53a7adb84 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -887,7 +887,16 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The date loki have to switch to TSDB and schema v13 + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 76248e752..b5b962610 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -847,7 +847,16 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The date loki have to switch to TSDB and schema v13 + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 0cb617b8c..afcee2307 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -1551,7 +1551,16 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The date loki have to switch to TSDB and schema v13 + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index b8288db5a..923e7ad37 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -904,7 +904,16 @@ type SpecDistributionModulesLoggingLoki struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The date loki have to switch to TSDB and schema v13 + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` } diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index f931e45a6..e5763327a 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1209,7 +1209,7 @@ "tsdbStartDate": { "type": "string", "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." }, "resources": { "$ref": "#/$defs/Types.KubeResources" diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 00881525e..848a2956e 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1598,7 +1598,7 @@ "tsdbStartDate": { "type": "string", "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." }, "resources": { "$ref": "#/$defs/Types.KubeResources" diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 69de3c38a..5a819a894 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -615,7 +615,7 @@ "tsdbStartDate": { "type": "string", "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." }, "resources": { "$ref": "#/$defs/Types.KubeResources" diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 5ed4b6ada..884223bf9 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1199,7 +1199,7 @@ "tsdbStartDate": { "type": "string", "format": "date", - "description": "The date loki have to switch to TSDB and schema v13" + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." }, "resources": { "$ref": "#/$defs/Types.KubeResources" From 753a6414c9909adc1151f5face90f9a0e04a9d38 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Tue, 19 Nov 2024 11:30:19 +0100 Subject: [PATCH 075/160] fix(defaults): remove no more necessary defaults for loki --- defaults/ekscluster-kfd-v1alpha2.yaml | 2 -- defaults/kfddistribution-kfd-v1alpha2.yaml | 2 -- defaults/onpremises-kfd-v1alpha2.yaml | 2 -- 3 files changed, 6 deletions(-) diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 6a65fe780..6c708be00 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -113,8 +113,6 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket - tsdbSchemav13Migration: - enabled: false customOutputs: {} # monitoring module configuration monitoring: diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index 75ae054d3..d0c790257 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -106,8 +106,6 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket - tsdbSchemav13Migration: - enabled: false customOutputs: {} # monitoring module configuration monitoring: diff --git a/defaults/onpremises-kfd-v1alpha2.yaml b/defaults/onpremises-kfd-v1alpha2.yaml index f6b628cd8..f26ad1e6e 100644 --- a/defaults/onpremises-kfd-v1alpha2.yaml +++ b/defaults/onpremises-kfd-v1alpha2.yaml @@ -106,8 +106,6 @@ data: secretAccessKey: example accessKeyId: example bucketName: lokibucket - tsdbSchemav13Migration: - enabled: false customOutputs: {} # monitoring module configuration monitoring: From 1658242a104356b2e31edf7cabec26b38577ebee Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Tue, 19 Nov 2024 11:32:24 +0100 Subject: [PATCH 076/160] chore(docs): linting release v1.3.0 --- docs/releases/v1.30.0.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index 8393a03f2..1557d0e0f 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -35,12 +35,14 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i ## New features ๐ŸŒŸ - **New option for Logging**: The Loki configuration has been extended to accommodate the new `tsdbStartDate` option to allow a migration towards TSDB and schema v13 (note: **this is a breaking change**): + ```yaml ... loki: tsdbStartDate: "2024-11-18" ... ``` + - `tsdbStartDate` (**required**): configures details for the schema config for the purpose of the migration `tsdbStartDate` should be a string in `ISO 8601` date format and it represents the day starting from which Loki will record logs with the new store and schema. @@ -51,7 +53,6 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i - **TBD**: TBD. - ## Breaking changes ๐Ÿ’” - **Loki store and schema change:** A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. From 861b62c32eee404b47f20aeb66526278a0ff7454 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Tue, 19 Nov 2024 12:06:47 +0100 Subject: [PATCH 077/160] chore(docs): update docs with latest changes --- docs/schemas/ekscluster-kfd-v1alpha2.md | 6 +++++- docs/schemas/kfddistribution-kfd-v1alpha2.md | 6 +++++- docs/schemas/onpremises-kfd-v1alpha2.md | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 6cc68ce46..f77b07d2b 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2453,7 +2453,11 @@ The memory request for the opensearch pods ### Description -The date loki have to switch to TSDB and schema v13 +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 919eb6d5a..f392254fa 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -1925,7 +1925,11 @@ The memory request for the opensearch pods ### Description -The date loki have to switch to TSDB and schema v13 +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index 886fe3262..e3626d219 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2147,7 +2147,11 @@ The memory request for the prometheus pods ### Description -The date loki have to switch to TSDB and schema v13 +Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs. + +The value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes. + +Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`. ## .spec.distribution.modules.logging.minio From e8d81d60f52bf731092d9dec02745e7847c89aea Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Tue, 19 Nov 2024 15:37:13 +0100 Subject: [PATCH 078/160] deps: update go to 1.23 and golangci-lint to 1.62 --- .tool-versions | 4 ++-- go.mod | 7 ++++--- go.sum | 10 ++++++---- pkg/apis/config/validation_test.go | 2 -- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.tool-versions b/.tool-versions index fdaa990a2..65e31e4a8 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,7 +1,7 @@ bats 1.9.0 drone 1.7.0 -golang 1.21.5 -golangci-lint 1.55.2 +golang 1.23.3 +golangci-lint 1.62.0 yq 4.33.3 jq 1.6 make 4.4.1 diff --git a/go.mod b/go.mod index 121af67c6..2ef42ff82 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,12 @@ module github.com/sighupio/fury-distribution -go 1.21 +go 1.23 require ( github.com/Al-Pragliola/go-version v1.6.2 github.com/go-playground/validator/v10 v10.15.5 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + github.com/sighupio/go-jsonschema v0.15.2 + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc ) require ( @@ -15,6 +16,6 @@ require ( github.com/leodido/go-urn v1.2.4 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect ) diff --git a/go.sum b/go.sum index 6e7eb35e8..af7697764 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,8 @@ github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sighupio/go-jsonschema v0.15.2 h1:Yt+QeiIwL9LZpYH+LwqiDD08FG8vjoyngrpHmfqPmmE= +github.com/sighupio/go-jsonschema v0.15.2/go.mod h1:3KaIPMGHZhUcDq2b+6rEZgkpT5mpstnsu+KnSbuf/R4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -26,12 +28,12 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/apis/config/validation_test.go b/pkg/apis/config/validation_test.go index 8189ff0ab..bac977fef 100644 --- a/pkg/apis/config/validation_test.go +++ b/pkg/apis/config/validation_test.go @@ -32,8 +32,6 @@ func TestValidateAwsRegion(t *testing.T) { }, } for _, tC := range testCases { - tC := tC - t.Run(tC.desc, func(t *testing.T) { t.Parallel() From 5b6e81bc71f52af618f4fc733dffb77ac64729fd Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Tue, 19 Nov 2024 15:40:12 +0100 Subject: [PATCH 079/160] deps: use golang 1.23.2 on CI --- .drone.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index b4de8c7e1..856500112 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,13 +15,13 @@ clone: steps: - name: license-check - image: quay.io/sighup/golang:1.21.5 + image: quay.io/sighup/golang:1.23.2 pull: always commands: - make license-check - name: schema-check - image: quay.io/sighup/golang:1.21.5 + image: quay.io/sighup/golang:1.23.2 pull: always commands: - |- @@ -50,7 +50,7 @@ steps: - schema-check - name: lint-go - image: quay.io/sighup/golang:1.21.5 + image: quay.io/sighup/golang:1.23.2 pull: always commands: - make lint-go From 6b02b55f8ad7151b67f873fdc7b9182e958c7220 Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Tue, 19 Nov 2024 15:52:26 +0100 Subject: [PATCH 080/160] deps: update all go tools to align with the one used in the CI --- Makefile | 6 +- schemas/private/ekscluster-kfd-v1alpha2.json | 3804 +++++++++--------- 2 files changed, 1905 insertions(+), 1905 deletions(-) diff --git a/Makefile b/Makefile index 51ad403b4..209b4f6b8 100644 --- a/Makefile +++ b/Makefile @@ -52,10 +52,10 @@ lint-go: .PHONY: tools-go tools-go: - @go install github.com/evanphx/json-patch/cmd/json-patch@v5.6.0 + @go install github.com/evanphx/json-patch/v5/cmd/json-patch@v5.9.0 @go install github.com/google/addlicense@v1.1.1 - @go install mvdan.cc/gofumpt@v0.5.0 - @go install golang.org/x/tools/cmd/goimports@v0.9.3 + @go install mvdan.cc/gofumpt@v0.7.0 + @go install golang.org/x/tools/cmd/goimports@v0.26.0 @go install github.com/daixiang0/gci@v0.10.1 @go install github.com/momaek/formattag@v0.0.9 @go install github.com/santhosh-tekuri/jsonschema/cmd/jv@v0.4.0 diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index e5763327a..7c791f03f 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1,4 +1,32 @@ { + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$" + }, + "kind": { + "type": "string", + "enum": [ + "EKSCluster" + ] + }, + "metadata": { + "$ref": "#/$defs/Metadata" + }, + "spec": { + "$ref": "#/$defs/Spec" + } + }, + "additionalProperties": false, + "required": [ + "apiVersion", + "kind", + "metadata", + "spec" + ], "$defs": { "Metadata": { "type": "object", @@ -100,251 +128,148 @@ } } }, - "Spec.Distribution": { + "Spec.ToolsConfiguration": { "type": "object", "additionalProperties": false, "properties": { - "common": { - "$ref": "#/$defs/Spec.Distribution.Common" - }, - "modules": { - "$ref": "#/$defs/Spec.Distribution.Modules" - }, - "customPatches": { - "$ref": "../public/spec-distribution-custompatches.json" + "terraform": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform" } }, "required": [ - "modules" - ], - "if": { - "allOf": [ - { - "required": [ - "common" - ] - }, - { - "properties": { - "common": { - "required": [ - "provider" - ] - } - } - }, - { - "properties": { - "common": { - "properties": { - "provider": { - "required": [ - "type" - ] - } - } - } - } - }, - { - "properties": { - "common": { - "properties": { - "provider": { - "properties": { - "type": { - "const": "eks" - } - } - } - } - } - } - } - ] - }, - "then": { - "properties": { - "modules": { - "required": [ - "aws" - ] - } - } - }, - "else": { - "properties": { - "modules": { - "properties": { - "aws": { - "type": "null" - } - } - } - } - } + "terraform" + ] }, - "Spec.Distribution.Common": { + "Spec.ToolsConfiguration.Terraform": { "type": "object", "additionalProperties": false, "properties": { - "nodeSelector": { - "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" - }, - "tolerations": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.KubeToleration" - }, - "description": "The tolerations that will be added to the pods for all the KFD modules" - }, - "provider": { - "$ref": "#/$defs/Spec.Distribution.Common.Provider" - }, - "relativeVendorPath": { - "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" - }, - "registry": { - "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "state": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State" } - } + }, + "required": [ + "state" + ] }, - "Spec.Distribution.Common.Provider": { + "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, "properties": { - "type": { - "type": "string", - "description": "The type of the provider, must be EKS if specified" + "s3": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" } }, "required": [ - "type" + "s3" ] }, - "Spec.Distribution.Modules": { + "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, "properties": { - "auth": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth" - }, - "aws": { - "$ref": "#/$defs/Spec.Distribution.Modules.Aws" - }, - "dr": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr" - }, - "ingress": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress" - }, - "logging": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging" - }, - "monitoring": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring" + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "description": "This value defines which bucket will be used to store all the states" }, - "tracing": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing" + "keyPrefix": { + "$ref": "#/$defs/Types.AwsS3KeyPrefix", + "description": "This value defines which folder will be used to store all the states inside the bucket" }, - "networking": { - "$ref": "#/$defs/Spec.Distribution.Modules.Networking" + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "This value defines in which region the bucket is located" }, - "policy": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy" + "skipRegionValidation": { + "type": "boolean", + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" } }, "required": [ - "dr", - "ingress", - "logging", - "policy" + "bucketName", + "keyPrefix", + "region" ] }, - "Spec.Distribution.Modules.Auth": { + "Spec.Infrastructure": { "type": "object", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" - }, - "provider": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider" - }, - "baseDomain": { - "type": "string", - "description": "The base domain for the auth module" - }, - "pomerium": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" + "vpc": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc", + "description": "This key defines the VPC that will be created in AWS" }, - "dex": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex" + "vpn": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn", + "description": "This section defines the creation of VPN bastions" } }, - "required": [ - "provider" - ], "allOf": [ { "if": { - "properties": { - "provider": { + "allOf": [ + { "properties": { - "type": { - "const": "sso" + "vpc": { + "type": "null" + } + } + }, + { + "not": { + "properties": { + "vpn": { + "type": "null" + } } } } - } - }, - "then": { - "required": [ - "dex", - "pomerium", - "baseDomain" ] }, - "else": { + "then": { "properties": { - "dex": { - "type": "null" - }, - "pomerium": { - "type": "null" + "vpn": { + "required": [ + "vpcId" + ] } } } }, { "if": { - "properties": { - "provider": { - "properties": { - "type": { - "const": "basicAuth" + "allOf": [ + { + "not": { + "properties": { + "vpc": { + "type": "null" + } + } + } + }, + { + "not": { + "properties": { + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } + } } } } - } + ] }, "then": { "properties": { - "provider": { - "required": [ - "basicAuth" - ] - } - } - }, - "else": { - "properties": { - "provider": { - "basicAuth": { - "type": "null" + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } } } } @@ -352,766 +277,950 @@ } ] }, - "Spec.Distribution.Modules.Auth.Dex": { + "Spec.Infrastructure.Vpc": { "type": "object", "additionalProperties": false, "properties": { - "connectors": { - "type": "array", - "description": "The connectors for dex" - }, - "additionalStaticClients": { - "type": "array", - "description": "The additional static clients for dex" - }, - "expiry": { - "type": "object", - "additionalProperties": false, - "properties": { - "signingKeys": { - "type": "string", - "description": "Dex signing key expiration time duration (default 6h)." - }, - "idTokens": { - "type": "string", - "description": "Dex ID tokens expiration time duration (default 24h)." - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "network": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network" } }, "required": [ - "connectors" + "network" ] }, - "Spec.Distribution.Modules.Auth.Overrides": { + "Spec.Infrastructure.Vpc.Network": { "type": "object", "additionalProperties": false, "properties": { - "nodeSelector": { - "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" - }, - "tolerations": { - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/$defs/Types.KubeToleration" - }, - "description": "The tolerations that will be added to the pods for the auth module" + "cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This is the CIDR of the VPC that will be created" }, - "ingresses": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" - } + "subnetsCidrs": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" } - } + }, + "required": [ + "cidr", + "subnetsCidrs" + ] }, - "Spec.Distribution.Modules.Auth.Overrides.Ingress": { + "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", "additionalProperties": false, "properties": { - "host": { - "type": "string", - "description": "The host of the ingress" + "private": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, - "ingressClass": { - "type": "string", - "description": "The ingress class of the ingress" + "public": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ - "host", - "ingressClass" + "private", + "public" ] }, - "Spec.Distribution.Modules.Auth.Pomerium": { - "$ref": "../public/spec-distribution-modules-auth-pomerium.json" - }, - "Spec.Distribution.Modules.Auth.Provider": { + "Spec.Infrastructure.Vpn": { "type": "object", "additionalProperties": false, "properties": { - "type": { + "instances": { + "type": "integer", + "description": "The number of instances to create, 0 to skip the creation" + }, + "port": { + "$ref": "#/$defs/Types.TcpPort", + "description": "The port used by the OpenVPN server" + }, + "instanceType": { "type": "string", - "enum": [ - "none", - "basicAuth", - "sso" - ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The size of the AWS EC2 instance" }, - "basicAuth": { - "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" + "diskSize": { + "type": "integer", + "description": "The size of the disk in GB" + }, + "operatorName": { + "type": "string", + "description": "The username of the account to create in the bastion's operating system" + }, + "dhParamsBits": { + "type": "integer", + "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + }, + "vpnClientsSubnetCidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + }, + "ssh": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" + }, + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + }, + "bucketNamePrefix": { + "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", + "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + }, + "iamUserNameOverride": { + "$ref": "#/$defs/Types.AwsIamRoleName", + "description": "Overrides the default IAM user name for the VPN" } }, "required": [ - "type" + "ssh", + "vpnClientsSubnetCidr" ] }, - "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { + "Spec.Infrastructure.Vpn.Ssh": { "type": "object", "additionalProperties": false, "properties": { - "username": { - "type": "string", - "description": "The username for the basic auth" + "publicKeys": { + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "#/$defs/Types.SshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" + } + ] + }, + "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" }, - "password": { - "type": "string", - "description": "The password for the basic auth" + "githubUsersName": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + }, + "allowedFromCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The CIDR enabled in the security group that can access the bastions in SSH" } }, "required": [ - "username", - "password" + "allowedFromCidrs", + "githubUsersName" ] }, - "Spec.Distribution.Modules.Aws": { + "Spec.Kubernetes": { "type": "object", "additionalProperties": false, "properties": { - "clusterAutoscaler": { - "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler" + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" }, - "ebsCsiDriver": { - "type": "object", - "additionalProperties": false, - "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" - } + "clusterIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default IAM role name prefix for the EKS cluster" + }, + "workersIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default IAM role name prefix for the EKS workers" + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" }, - "required": [ - "iamRoleArn" - ] + "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" }, - "loadBalancerController": { - "type": "object", - "additionalProperties": false, - "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "apiServer": { + "$ref": "#/$defs/Spec.Kubernetes.APIServer" + }, + "serviceIpV4Cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + }, + "nodeAllowedSshPublicKey": { + "anyOf": [ + { + "$ref": "#/$defs/Types.AwsSshPubKey" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + { + "$ref": "#/$defs/Types.FileRef" } + ], + "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + }, + "nodePoolsLaunchKind": { + "type": "string", + "enum": [ + "launch_configurations", + "launch_templates", + "both" + ], + "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + }, + "logRetentionDays": { + "type": "integer", + "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + }, + "logsTypes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ] }, - "required": [ - "iamRoleArn" - ] + "minItems": 0, + "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." }, - "ebsSnapshotController": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } + "nodePools": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool" } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "awsAuth": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth" } }, "required": [ - "clusterAutoscaler", - "ebsCsiDriver", - "loadBalancerController", - "overrides" + "apiServer", + "nodeAllowedSshPublicKey", + "nodePools", + "nodePoolsLaunchKind" ] }, - "Spec.Distribution.Modules.Aws.ClusterAutoscaler": { + "Spec.Kubernetes.APIServer": { "type": "object", "additionalProperties": false, "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "privateAccess": { + "type": "boolean", + "description": "This value defines if the API server will be accessible only from the private subnets" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + "privateAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + }, + "publicAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + }, + "publicAccess": { + "type": "boolean", + "description": "This value defines if the API server will be accessible from the public subnets" } }, "required": [ - "iamRoleArn" + "privateAccess", + "publicAccess" ] }, - "Spec.Distribution.Modules.Dr": { + "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" - }, "type": { "type": "string", "enum": [ - "none", - "eks" + "eks-managed", + "self-managed" + ] + }, + "name": { + "type": "string", + "description": "The name of the node pool" + }, + "ami": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" + }, + "containerRuntime": { + "type": "string", + "enum": [ + "docker", + "containerd" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The container runtime to use for the nodes" }, - "velero": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" + "size": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" + }, + "instance": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" + }, + "attachedTargetGroups": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsArn" + }, + "description": "This optional array defines additional target groups to attach to the instances in the node pool" + }, + "labels": { + "$ref": "#/$defs/Types.KubeLabels", + "description": "Kubernetes labels that will be added to the nodes" + }, + "taints": { + "$ref": "#/$defs/Types.KubeTaints", + "description": "Kubernetes taints that will be added to the nodes" + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "AWS tags that will be added to the ASG and EC2 instances" + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" + }, + "description": "This value defines the subnet IDs where the nodes will be created" + }, + "additionalFirewallRules": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" } }, "required": [ - "type" - ], - "if": { - "properties": { - "type": { - "const": "eks" - } - } - }, - "then": { - "required": [ - "type", - "velero" - ] - } + "instance", + "name", + "size" + ] }, - "Spec.Distribution.Modules.Dr.Velero": { + "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, "properties": { - "schedules": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for Velero's backup schedules.", - "properties": { - "install": { - "type": "boolean", - "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." - }, - "cron": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for Velero's schedules cron.", - "properties": { - "manifests": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." - }, - "full": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." - } - } - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." - } - } - }, - "eks": { - "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" + "id": { + "type": "string", + "description": "The AMI ID to use for the nodes" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "owner": { + "type": "string", + "description": "The owner of the AMI" } }, "required": [ - "eks" + "id", + "owner" ] }, - "Spec.Distribution.Modules.Dr.Velero.Eks": { + "Spec.Kubernetes.NodePool.Instance": { + "type": "object", "additionalProperties": false, "properties": { - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "maxLength": 49, - "description": "The name of the velero bucket" + "type": { + "type": "string", + "description": "The instance type to use for the nodes" }, - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "spot": { + "type": "boolean", + "description": "If true, the nodes will be created as spot instances" }, - "region": { - "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" - } - }, - "required": [ - "iamRoleArn", - "region", - "bucketName" - ], - "type": "object" - }, - "Spec.Distribution.Modules.Ingress": { - "additionalProperties": false, - "allOf": [ - { - "if": { - "properties": { - "nginx": { - "properties": { - "type": { - "const": "dual" - } - } - } - } - }, - "then": { - "required": [ - "dns" - ], - "properties": { - "dns": { - "required": [ - "public", - "private" - ] - } - } - } - }, - { - "if": { - "properties": { - "nginx": { - "properties": { - "type": { - "const": "single" - } - } - } - } - }, - "then": { - "required": [ - "dns" - ], - "properties": { - "dns": { - "required": [ - "public" - ] - } - } - } + "volumeSize": { + "type": "integer", + "description": "The size of the disk in GB" }, - { - "if": { - "properties": { - "nginx": { - "properties": { - "tls": { - "properties": { - "provider": { - "const": "certManager" - } - } - } - } - } - } - }, - "then": { - "required": [ - "certManager" - ] - } - } - ], - "properties": { - "baseDomain": { + "volumeType": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" - }, - "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" - }, - "dns": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" - }, - "externalDns": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS" - }, - "forecastle": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" - }, - "nginx": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "enum": [ + "gp2", + "gp3", + "io1", + "standard" + ] }, - "overrides": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" + "maxPods": { + "type": "integer" } }, "required": [ - "certManager", - "externalDns", - "baseDomain", - "nginx" - ], - "type": "object" + "type" + ] }, - "Spec.Distribution.Modules.Ingress.CertManager": { + "Spec.Kubernetes.NodePool.Size": { "type": "object", "additionalProperties": false, "properties": { - "clusterIssuer": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" + "min": { + "type": "integer", + "minimum": 0, + "description": "The minimum number of nodes in the node pool" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "max": { + "type": "integer", + "minimum": 0, + "description": "The maximum number of nodes in the node pool" } }, "required": [ - "clusterIssuer" + "max", + "min" ] }, - "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { + "type": "object", "additionalProperties": false, - "oneOf": [ - { - "required": [ - "type" - ] + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" + }, + "minItems": 1, + "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." }, - { - "required": [ - "solvers" - ] + "sourceSecurityGroupId": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId" + }, + "minItems": 1 + }, + "self": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self" + }, + "minItems": 1 } - ], + } + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": { + "type": "object", + "additionalProperties": false, "properties": { - "email": { - "type": "string", - "format": "email", - "description": "The email of the cluster issuer" - }, "name": { + "type": "string" + }, + "type": { "type": "string", - "description": "The name of the cluster issuer" + "enum": [ + "ingress", + "egress" + ] }, - "route53": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" + "tags": { + "$ref": "#/$defs/Types.AwsTags" }, - "solvers": { + "cidrBlocks": { "type": "array", - "description": "The custom solvers configurations" + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 1 }, - "type": { - "type": "string", - "enum": [ - "dns01", - "http01" - ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ - "route53", + "cidrBlocks", "name", - "email" - ], - "type": "object" + "ports", + "protocol", + "type" + ] }, - "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": { "type": "object", "additionalProperties": false, "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "name": { + "type": "string", + "description": "The name of the FW rule" }, - "region": { - "$ref": "#/$defs/Types.AwsRegion" + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the FW rule can be ingress or egress" }, - "hostedZoneId": { - "type": "string" + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "The tags of the FW rule" + }, + "sourceSecurityGroupId": { + "type": "string", + "description": "The source security group ID" + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the FW rule" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ - "hostedZoneId", - "iamRoleArn", - "region" + "sourceSecurityGroupId", + "name", + "ports", + "protocol", + "type" ] }, - "Spec.Distribution.Modules.Ingress.DNS": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": { "type": "object", "additionalProperties": false, "properties": { - "public": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" + "name": { + "type": "string", + "description": "The name of the FW rule" }, - "private": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the FW rule can be ingress or egress" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Ingress.DNS.Private": { - "additionalProperties": false, - "properties": { - "create": { + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "The tags of the FW rule" + }, + "self": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "If true, the source will be the security group itself" }, - "name": { - "type": "string", - "description": "The name of the private hosted zone" + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the FW rule" }, - "vpcId": { - "type": "string" + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" } }, "required": [ - "vpcId", + "self", "name", - "create" - ], - "type": "object" + "ports", + "protocol", + "type" + ] }, - "Spec.Distribution.Modules.Ingress.DNS.Public": { + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", "additionalProperties": false, "properties": { - "name": { - "type": "string", - "description": "The name of the public hosted zone" + "from": { + "$ref": "#/$defs/Types.TcpPort" }, - "create": { - "type": "boolean", - "description": "If true, the public hosted zone will be created" + "to": { + "$ref": "#/$defs/Types.TcpPort" } }, "required": [ - "name", - "create" + "from", + "to" ] }, - "Spec.Distribution.Modules.Ingress.ExternalDNS": { + "Spec.Kubernetes.AwsAuth": { "type": "object", "additionalProperties": false, "properties": { - "privateIamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "additionalAccounts": { + "type": "array", + "items": { + "type": "string" + }, + "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" }, - "publicIamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "users": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" + }, + "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" + }, + "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" } - }, - "required": [ - "privateIamRoleArn", - "publicIamRoleArn" - ] + } }, - "Spec.Distribution.Modules.Ingress.Forecastle": { + "Spec.Kubernetes.AwsAuth.Role": { "type": "object", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "username": { + "type": "string" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "rolearn": { + "$ref": "#/$defs/Types.AwsArn" } - } + }, + "required": [ + "groups", + "rolearn", + "username" + ] }, - "Spec.Distribution.Modules.Ingress.Nginx": { + "Spec.Kubernetes.AwsAuth.User": { "type": "object", "additionalProperties": false, "properties": { - "type": { - "type": "string", - "enum": [ - "none", - "single", - "dual" - ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "username": { + "type": "string" }, - "tls": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" + "groups": { + "type": "array", + "items": { + "type": "string" + } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "userarn": { + "$ref": "#/$defs/Types.AwsArn" } }, "required": [ - "type" + "groups", + "userarn", + "username" ] }, - "Spec.Distribution.Modules.Ingress.Nginx.TLS": { + "Spec.Distribution": { "type": "object", "additionalProperties": false, "properties": { - "provider": { - "type": "string", - "enum": [ - "certManager", - "secret", - "none" - ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "common": { + "$ref": "#/$defs/Spec.Distribution.Common" }, - "secret": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" + "modules": { + "$ref": "#/$defs/Spec.Distribution.Modules" + }, + "customPatches": { + "$ref": "../public/spec-distribution-custompatches.json" } }, "required": [ - "provider" + "modules" ], "if": { + "allOf": [ + { + "required": [ + "common" + ] + }, + { + "properties": { + "common": { + "required": [ + "provider" + ] + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "required": [ + "type" + ] + } + } + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "eks" + } + } + } + } + } + } + } + ] + }, + "then": { "properties": { - "provider": { - "const": "secret" + "modules": { + "required": [ + "aws" + ] } } }, - "then": { - "required": [ - "secret" - ] + "else": { + "properties": { + "modules": { + "properties": { + "aws": { + "type": "null" + } + } + } + } } }, - "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { + "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, "properties": { - "cert": { - "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for all the KFD modules" }, - "key": { - "type": "string" + "tolerations": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for all the KFD modules" }, - "ca": { - "type": "string" - } - }, - "required": [ - "ca", - "cert", - "key" - ] + "provider": { + "$ref": "#/$defs/Spec.Distribution.Common.Provider" + }, + "relativeVendorPath": { + "type": "string", + "description": "The relative path to the vendor directory, does not need to be changed" + }, + "registry": { + "type": "string", + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + } + } }, - "Spec.Distribution.Modules.Ingress.Overrides": { + "Spec.Distribution.Common.Provider": { "type": "object", "additionalProperties": false, "properties": { - "ingresses": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" - }, - "nodeSelector": { - "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" - }, - "tolerations": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.KubeToleration" - }, - "description": "The tolerations that will be added to the pods for the ingress module" + "type": { + "type": "string", + "description": "The type of the provider, must be EKS if specified" } - } + }, + "required": [ + "type" + ] }, - "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": { + "Spec.Distribution.Modules": { "type": "object", "additionalProperties": false, "properties": { - "forecastle": { - "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + "auth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth" + }, + "aws": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws" + }, + "dr": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr" + }, + "ingress": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress" + }, + "logging": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging" + }, + "monitoring": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring" + }, + "tracing": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing" + }, + "networking": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking" + }, + "policy": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy" } - } + }, + "required": [ + "dr", + "ingress", + "logging", + "policy" + ] }, - "Spec.Distribution.Modules.Logging": { + "Spec.Distribution.Modules.Ingress": { "type": "object", "additionalProperties": false, "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" }, - "type": { + "baseDomain": { "type": "string", - "enum": [ - "none", - "opensearch", - "loki", - "customOutputs" - ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." - }, - "opensearch": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" + "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" }, - "loki": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Loki" + "nginx": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", + "description": "Configurations for the nginx ingress controller module" }, - "cerebro": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Cerebro" + "certManager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Minio" + "dns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" }, - "operator": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Operator" + "forecastle": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" }, - "customOutputs": { - "$ref": "#/$defs/Spec.Distribution.Modules.Logging.CustomOutputs" + "externalDns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS" } }, "required": [ - "type" + "certManager", + "externalDns", + "baseDomain", + "nginx" ], "allOf": [ { "if": { "properties": { - "type": { - "const": "opensearch" + "nginx": { + "properties": { + "type": { + "const": "dual" + } + } } } }, "then": { "required": [ - "opensearch" - ] + "dns" + ], + "properties": { + "dns": { + "required": [ + "public", + "private" + ] + } + } } }, { "if": { "properties": { - "type": { - "const": "loki" + "nginx": { + "properties": { + "type": { + "const": "single" + } + } } } }, "then": { "required": [ - "loki" - ] + "dns" + ], + "properties": { + "dns": { + "required": [ + "public" + ] + } + } } }, { "if": { "properties": { - "type": { - "const": "customOutputs" + "nginx": { + "properties": { + "tls": { + "properties": { + "provider": { + "const": "certManager" + } + } + } + } } } }, "then": { "required": [ - "customOutputs" + "certManager" ] } } ] }, - "Spec.Distribution.Modules.Logging.Cerebro": { + "Spec.Distribution.Modules.Ingress.Overrides": { + "type": "object", + "additionalProperties": false, + "properties": { + "ingresses": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" + }, + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the ingress module" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the ingress module" + } + } + }, + "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": { + "type": "object", + "additionalProperties": false, + "properties": { + "forecastle": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + } + } + }, + "Spec.Distribution.Modules.Ingress.Forecastle": { "type": "object", "additionalProperties": false, "properties": { @@ -1120,446 +1229,235 @@ } } }, - "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "Spec.Distribution.Modules.Ingress.Nginx": { "type": "object", "additionalProperties": false, "properties": { - "audit": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "events": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "infra": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "ingressNginx": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "kubernetes": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." - }, - "systemdCommon": { + "type": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "enum": [ + "none", + "single", + "dual" + ], + "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" }, - "systemdEtcd": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "tls": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" }, - "errors": { - "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "audit", - "events", - "infra", - "ingressNginx", - "kubernetes", - "systemdCommon", - "systemdEtcd", - "errors" + "type" ] }, - "Spec.Distribution.Modules.Logging.Loki": { + "Spec.Distribution.Modules.Ingress.Nginx.TLS": { "type": "object", "additionalProperties": false, "properties": { - "backend": { + "provider": { "type": "string", "enum": [ - "minio", - "externalEndpoint" - ] - }, - "externalEndpoint": { - "type": "object", - "additionalProperties": false, - "properties": { - "endpoint": { - "type": "string", - "description": "The endpoint of the loki external endpoint" - }, - "insecure": { - "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" - }, - "secretAccessKey": { - "type": "string", - "description": "The secret access key of the loki external endpoint" - }, - "accessKeyId": { - "type": "string", - "description": "The access key id of the loki external endpoint" - }, - "bucketName": { - "type": "string", - "description": "The bucket name of the loki external endpoint" - } - } - }, - "tsdbStartDate": { - "type": "string", - "format": "date", - "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + "certManager", + "secret", + "none" + ], + "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" }, - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "secret": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" } }, "required": [ - "tsdbStartDate" - ] - }, - "Spec.Distribution.Modules.Logging.Minio": { - "type": "object", - "additionalProperties": false, - "properties": { - "storageSize": { - "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" - }, - "rootUser": { - "type": "object", - "additionalProperties": false, - "properties": { - "username": { - "type": "string", - "description": "The username of the minio root user" - }, - "password": { - "type": "string", - "description": "The password of the minio root user" - } + "provider" + ], + "if": { + "properties": { + "provider": { + "const": "secret" } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } + }, + "then": { + "required": [ + "secret" + ] } }, - "Spec.Distribution.Modules.Logging.Opensearch": { + "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, "properties": { - "type": { + "cert": { "type": "string", - "enum": [ - "single", - "triple" - ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" - }, - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "description": "The certificate file content or you can use the file notation to get the content from a file" }, - "storageSize": { - "type": "string", - "description": "The storage size for the opensearch pods" + "key": { + "type": "string" }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "ca": { + "type": "string" } }, "required": [ - "type" + "ca", + "cert", + "key" ] }, - "Spec.Distribution.Modules.Logging.Operator": { + "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, "properties": { + "clusterIssuer": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" + }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - } + }, + "required": [ + "clusterIssuer" + ] }, - "Spec.Distribution.Modules.Monitoring": { + "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", "properties": { + "name": { + "type": "string", + "description": "The name of the cluster issuer" + }, + "email": { + "type": "string", + "format": "email", + "description": "The email of the cluster issuer" + }, "type": { "type": "string", "enum": [ - "none", - "prometheus", - "prometheusAgent", - "mimir" + "dns01", + "http01" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" - }, - "prometheus": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Prometheus" - }, - "prometheusAgent": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.PrometheusAgent" - }, - "alertmanager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.AlertManager" - }, - "grafana": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Grafana" - }, - "blackboxExporter": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.BlackboxExporter" - }, - "kubeStateMetrics": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.KubeStateMetrics" - }, - "x509Exporter": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.X509Exporter" + "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" }, - "mimir": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Mimir" + "solvers": { + "type": "array", + "description": "The custom solvers configurations" }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Minio" + "route53": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" } }, "required": [ - "type" + "route53", + "name", + "email" + ], + "oneOf": [ + { + "required": [ + "type" + ] + }, + { + "required": [ + "solvers" + ] + } ] }, - "Spec.Distribution.Modules.Monitoring.AlertManager": { + "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", "additionalProperties": false, "properties": { - "deadManSwitchWebhookUrl": { - "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "public": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" }, - "installDefaultRules": { - "type": "boolean", - "description": "If true, the default rules will be installed" + "private": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" }, - "slackWebhookUrl": { - "type": "string", - "description": "The slack webhook url to send alerts" - } - } - }, - "Spec.Distribution.Modules.Monitoring.BlackboxExporter": { - "type": "object", - "additionalProperties": false, - "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } } }, - "Spec.Distribution.Modules.Monitoring.Grafana": { + "Spec.Distribution.Modules.Ingress.DNS.Public": { "type": "object", "additionalProperties": false, "properties": { - "usersRoleAttributePath": { + "name": { "type": "string", - "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)." + "description": "The name of the public hosted zone" }, - "basicAuthIngress": { + "create": { "type": "boolean", - "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled." - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "description": "If true, the public hosted zone will be created" } - } + }, + "required": [ + "name", + "create" + ] }, - "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": { + "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", "additionalProperties": false, "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "name": { + "type": "string", + "description": "The name of the private hosted zone" + }, + "create": { + "type": "boolean", + "description": "If true, the private hosted zone will be created" + }, + "vpcId": { + "type": "string" } - } + }, + "required": [ + "vpcId", + "name", + "create" + ] }, - "Spec.Distribution.Modules.Monitoring.Mimir": { + "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for the mimir pods" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "backend": { + "type": { "type": "string", "enum": [ - "minio", - "externalEndpoint" + "none", + "opensearch", + "loki", + "customOutputs" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" - }, - "externalEndpoint": { - "type": "object", - "additionalProperties": false, - "properties": { - "endpoint": { - "type": "string", - "description": "The endpoint of the external mimir backend" - }, - "insecure": { - "type": "boolean", - "description": "If true, the external mimir backend will not use tls" - }, - "secretAccessKey": { - "type": "string", - "description": "The secret access key of the external mimir backend" - }, - "accessKeyId": { - "type": "string", - "description": "The access key id of the external mimir backend" - }, - "bucketName": { - "type": "string", - "description": "The bucket name of the external mimir backend" - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Monitoring.Minio": { - "type": "object", - "additionalProperties": false, - "properties": { - "storageSize": { - "type": "string", - "description": "The storage size for the minio pods" - }, - "rootUser": { - "type": "object", - "additionalProperties": false, - "properties": { - "username": { - "type": "string", - "description": "The username for the minio root user" - }, - "password": { - "type": "string", - "description": "The password for the minio root user" - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Monitoring.Prometheus": { - "type": "object", - "additionalProperties": false, - "properties": { - "resources": { - "$ref": "#/$defs/Types.KubeResources" - }, - "retentionTime": { - "type": "string", - "description": "The retention time for the k8s Prometheus instance." - }, - "retentionSize": { - "type": "string", - "description": "The retention size for the k8s Prometheus instance." - }, - "storageSize": { - "type": "string", - "description": "The storage size for the k8s Prometheus instance." - }, - "remoteWrite": { - "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", - "type": "array", - "items": { - "type": "object" - } - } - } - }, - "Spec.Distribution.Modules.Monitoring.PrometheusAgent": { - "type": "object", - "additionalProperties": false, - "properties": { - "resources": { - "$ref": "#/$defs/Types.KubeResources" + "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." }, - "remoteWrite": { - "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", - "type": "array", - "items": { - "type": "object" - } - } - } - }, - "Spec.Distribution.Modules.Monitoring.X509Exporter": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Networking": { - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "opensearch": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" }, - "tigeraOperator": { - "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" + "loki": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Loki" }, - "type": { - "type": "string", - "enum": [ - "none" - ] - } - }, - "type": "object" - }, - "Spec.Distribution.Modules.Networking.TigeraOperator": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Policy": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" + "cerebro": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Cerebro" }, - "type": { - "type": "string", - "enum": [ - "none", - "gatekeeper", - "kyverno" - ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Minio" }, - "gatekeeper": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" + "operator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Operator" }, - "kyverno": { - "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno" + "customOutputs": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.CustomOutputs" } }, "required": [ @@ -1570,13 +1468,13 @@ "if": { "properties": { "type": { - "const": "gatekeeper" + "const": "opensearch" } } }, "then": { "required": [ - "gatekeeper" + "opensearch" ] } }, @@ -1584,116 +1482,75 @@ "if": { "properties": { "type": { - "const": "kyverno" + "const": "loki" } } }, "then": { "required": [ - "kyverno" + "loki" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "customOutputs" + } + } + }, + "then": { + "required": [ + "customOutputs" ] } } ] }, - "Spec.Distribution.Modules.Policy.Gatekeeper": { + "Spec.Distribution.Modules.Logging.Opensearch": { "type": "object", "additionalProperties": false, "properties": { - "additionalExcludedNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them." - }, - "enforcementAction": { + "type": { "type": "string", "enum": [ - "deny", - "dryrun", - "warn" + "single", + "triple" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The type of the opensearch, must be ***single*** or ***triple***" }, - "installDefaultPolicies": { - "type": "boolean", - "description": "If true, the default policies will be installed" + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "storageSize": { + "type": "string", + "description": "The storage size for the opensearch pods" }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "enforcementAction", - "installDefaultPolicies" + "type" ] }, - "Spec.Distribution.Modules.Policy.Kyverno": { + "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", "additionalProperties": false, "properties": { - "additionalExcludedNamespaces": { - "type": "array", - "items": { - "type": "string" - }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." - }, - "validationFailureAction": { - "type": "string", - "enum": [ - "Audit", - "Enforce" - ], - "description": "The validation failure action to use for the kyverno module" - }, - "installDefaultPolicies": { - "type": "boolean", - "description": "If true, the default policies will be installed" - }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "validationFailureAction", - "installDefaultPolicies" - ] + } }, - "Spec.Distribution.Modules.Tracing": { - "type": "object", - "additionalProperties": false, - "properties": { - "overrides": { - "$ref": "#/$defs/Types.FuryModuleOverrides" - }, - "type": { - "type": "string", - "enum": [ - "none", - "tempo" - ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" - }, - "tempo": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" - }, - "minio": { - "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio" - } - }, - "required": [ - "type" - ] - }, - "Spec.Distribution.Modules.Tracing.Minio": { + "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each minio disk, 6 disks total" }, "rootUser": { "type": "object", @@ -1701,11 +1558,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username of the minio root user" }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password of the minio root user" } } }, @@ -1714,21 +1571,16 @@ } } }, - "Spec.Distribution.Modules.Tracing.Tempo": { + "Spec.Distribution.Modules.Logging.Loki": { "type": "object", "additionalProperties": false, "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for the tempo pods" - }, "backend": { "type": "string", "enum": [ "minio", "externalEndpoint" - ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + ] }, "externalEndpoint": { "type": "object", @@ -1736,836 +1588,981 @@ "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The endpoint of the loki external endpoint" }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, the loki external endpoint will be insecure" }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key of the loki external endpoint" }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key id of the loki external endpoint" }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the loki external endpoint" } } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + }, + "resources": { + "$ref": "#/$defs/Types.KubeResources" } - } + }, + "required": [ + "tsdbStartDate" + ] }, - "Spec.Infrastructure": { + "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, "properties": { - "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" - }, - "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" - } - }, - "allOf": [ - { - "if": { - "allOf": [ - { - "properties": { - "vpc": { - "type": "null" - } - } - }, - { - "not": { - "properties": { - "vpn": { - "type": "null" - } - } - } - } - ] - }, - "then": { - "properties": { - "vpn": { - "required": [ - "vpcId" - ] - } - } - } - }, - { - "if": { - "allOf": [ - { - "not": { - "properties": { - "vpc": { - "type": "null" - } - } - } - }, - { - "not": { - "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } - } - } - } - } - ] - }, - "then": { - "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } - } - } - } + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - ] + } }, - "Spec.Infrastructure.Vpc": { + "Spec.Distribution.Modules.Logging.CustomOutputs": { + "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { - "network": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network" + "audit": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "events": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "infra": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "ingressNginx": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "kubernetes": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "systemdCommon": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "systemdEtcd": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "errors": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." } }, "required": [ - "network" + "audit", + "events", + "infra", + "ingressNginx", + "kubernetes", + "systemdCommon", + "systemdEtcd", + "errors" ] }, - "Spec.Infrastructure.Vpc.Network": { + "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, + "description": "configuration for the Monitoring module components", "properties": { - "cidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "type": { + "type": "string", + "enum": [ + "none", + "prometheus", + "prometheusAgent", + "mimir" + ], + "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." }, - "subnetsCidrs": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "prometheus": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Prometheus" + }, + "prometheusAgent": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.PrometheusAgent" + }, + "alertmanager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.AlertManager" + }, + "grafana": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Grafana" + }, + "blackboxExporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.BlackboxExporter" + }, + "kubeStateMetrics": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.KubeStateMetrics" + }, + "x509Exporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.X509Exporter" + }, + "mimir": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Mimir" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Minio" } }, "required": [ - "cidr", - "subnetsCidrs" + "type" ] }, - "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { + "Spec.Distribution.Modules.Monitoring.Prometheus": { "type": "object", "additionalProperties": false, "properties": { - "private": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "resources": { + "$ref": "#/$defs/Types.KubeResources" }, - "public": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "retentionTime": { + "type": "string", + "description": "The retention time for the k8s Prometheus instance." + }, + "retentionSize": { + "type": "string", + "description": "The retention size for the k8s Prometheus instance." + }, + "storageSize": { + "type": "string", + "description": "The storage size for the k8s Prometheus instance." + }, + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", + "type": "array", + "items": { + "type": "object" + } } - }, - "required": [ - "private", - "public" - ] + } }, - "Spec.Infrastructure.Vpn": { + "Spec.Distribution.Modules.Monitoring.PrometheusAgent": { "type": "object", "additionalProperties": false, "properties": { - "instances": { - "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" - }, - "port": { - "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "resources": { + "$ref": "#/$defs/Types.KubeResources" }, - "instanceType": { + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "Spec.Distribution.Modules.Monitoring.AlertManager": { + "type": "object", + "additionalProperties": false, + "properties": { + "deadManSwitchWebhookUrl": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" }, - "diskSize": { - "type": "integer", - "description": "The size of the disk in GB" + "installDefaultRules": { + "type": "boolean", + "description": "If true, the default rules will be installed" }, - "operatorName": { + "slackWebhookUrl": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The slack webhook url to send alerts" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Grafana": { + "type": "object", + "additionalProperties": false, + "properties": { + "usersRoleAttributePath": { + "type": "string", + "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)." }, - "dhParamsBits": { - "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "basicAuthIngress": { + "type": "boolean", + "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled." }, - "vpnClientsSubnetCidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.BlackboxExporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.X509Exporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Mimir": { + "type": "object", + "additionalProperties": false, + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the mimir pods" }, - "ssh": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" }, - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The endpoint of the external mimir backend" + }, + "insecure": { + "type": "boolean", + "description": "If true, the external mimir backend will not use tls" + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the external mimir backend" + }, + "accessKeyId": { + "type": "string", + "description": "The access key id of the external mimir backend" + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external mimir backend" + } + } }, - "bucketNamePrefix": { - "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Minio": { + "type": "object", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The storage size for the minio pods" }, - "iamUserNameOverride": { - "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the minio root user" + }, + "password": { + "type": "string", + "description": "The password for the minio root user" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } - }, - "required": [ - "ssh", - "vpnClientsSubnetCidr" - ] + } }, - "Spec.Infrastructure.Vpn.Ssh": { + "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, "properties": { - "publicKeys": { - "type": "array", - "items": { - "anyOf": [ - { - "$ref": "#/$defs/Types.SshPubKey" - }, - { - "$ref": "#/$defs/Types.FileRef" - } - ] - }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "githubUsersName": { - "type": "array", - "items": { - "type": "string" - }, - "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "type": { + "type": "string", + "enum": [ + "none", + "tempo" + ], + "description": "The type of tracing to use, either ***none*** or ***tempo***" }, - "allowedFromCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "tempo": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio" } }, "required": [ - "allowedFromCidrs", - "githubUsersName" + "type" ] }, - "Spec.Kubernetes": { + "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, "properties": { - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" - }, - "clusterIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" - }, - "workersIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" - }, - "subnetIds": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsSubnetId" - }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" - }, - "apiServer": { - "$ref": "#/$defs/Spec.Kubernetes.APIServer" + "retentionTime": { + "type": "string", + "description": "The retention time for the tempo pods" }, - "serviceIpV4Cidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" }, - "nodeAllowedSshPublicKey": { - "anyOf": [ - { - "$ref": "#/$defs/Types.AwsSshPubKey" + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The endpoint of the external tempo backend" }, - { - "$ref": "#/$defs/Types.FileRef" + "insecure": { + "type": "boolean", + "description": "If true, the external tempo backend will not use tls" + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the external tempo backend" + }, + "accessKeyId": { + "type": "string", + "description": "The access key id of the external tempo backend" + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external tempo backend" } - ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + } }, - "nodePoolsLaunchKind": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing.Minio": { + "type": "object", + "additionalProperties": false, + "properties": { + "storageSize": { "type": "string", - "enum": [ - "launch_configurations", - "launch_templates", - "both" - ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "The storage size for the minio pods" }, - "logRetentionDays": { - "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the minio root user" + }, + "password": { + "type": "string", + "description": "The password for the minio root user" + } + } }, - "logsTypes": { - "type": "array", - "items": { - "type": "string", - "enum": [ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler" - ] - }, - "minItems": 0, - "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Networking": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" }, - "nodePools": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool" - } + "tigeraOperator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" }, - "awsAuth": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth" + "type": { + "type": "string", + "enum": [ + "none" + ] } - }, - "required": [ - "apiServer", - "nodeAllowedSshPublicKey", - "nodePools", - "nodePoolsLaunchKind" - ] + } }, - "Spec.Kubernetes.APIServer": { + "Spec.Distribution.Modules.Networking.TigeraOperator": { "type": "object", "additionalProperties": false, "properties": { - "privateAccess": { - "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Policy": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, - "privateAccessCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "type": { + "type": "string", + "enum": [ + "none", + "gatekeeper", + "kyverno" + ], + "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" }, - "publicAccessCidrs": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "gatekeeper": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" }, - "publicAccess": { - "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "kyverno": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno" } }, "required": [ - "privateAccess", - "publicAccess" + "type" + ], + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "gatekeeper" + } + } + }, + "then": { + "required": [ + "gatekeeper" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "kyverno" + } + } + }, + "then": { + "required": [ + "kyverno" + ] + } + } ] }, - "Spec.Kubernetes.AwsAuth": { + "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, "properties": { - "additionalAccounts": { + "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" - }, - "users": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" - }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them." }, - "roles": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" - }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" - } - } - }, - "Spec.Kubernetes.AwsAuth.Role": { - "type": "object", - "additionalProperties": false, - "properties": { - "username": { - "type": "string" + "enforcementAction": { + "type": "string", + "enum": [ + "deny", + "dryrun", + "warn" + ], + "description": "The enforcement action to use for the gatekeeper module" }, - "groups": { - "type": "array", - "items": { - "type": "string" - } + "installDefaultPolicies": { + "type": "boolean", + "description": "If true, the default policies will be installed" }, - "rolearn": { - "$ref": "#/$defs/Types.AwsArn" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "groups", - "rolearn", - "username" + "enforcementAction", + "installDefaultPolicies" ] }, - "Spec.Kubernetes.AwsAuth.User": { + "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, "properties": { - "username": { - "type": "string" - }, - "groups": { + "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" - } - }, - "userarn": { - "$ref": "#/$defs/Types.AwsArn" - } - }, - "required": [ - "groups", - "userarn", - "username" - ] - }, - "Spec.Kubernetes.NodePool": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "eks-managed", - "self-managed" - ] - }, - "name": { - "type": "string", - "description": "The name of the node pool" - }, - "ami": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" + }, + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." }, - "containerRuntime": { + "validationFailureAction": { "type": "string", "enum": [ - "docker", - "containerd" + "Audit", + "Enforce" ], - "description": "The container runtime to use for the nodes" - }, - "size": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" - }, - "instance": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" - }, - "attachedTargetGroups": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsArn" - }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" - }, - "labels": { - "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" - }, - "taints": { - "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "The validation failure action to use for the kyverno module" }, - "subnetIds": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsSubnetId" - }, - "description": "This value defines the subnet IDs where the nodes will be created" + "installDefaultPolicies": { + "type": "boolean", + "description": "If true, the default policies will be installed" }, - "additionalFirewallRules": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "instance", - "name", - "size" + "validationFailureAction", + "installDefaultPolicies" ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": { + "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, "properties": { - "name": { - "type": "string" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "type": { "type": "string", "enum": [ - "ingress", - "egress" - ] - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags" - }, - "cidrBlocks": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "minItems": 1 - }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol" + "none", + "eks" + ], + "description": "The type of the DR, must be ***none*** or ***eks***" }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "velero": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" } }, "required": [ - "cidrBlocks", - "name", - "ports", - "protocol", "type" - ] - }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { - "type": "object", - "additionalProperties": false, - "properties": { - "from": { - "$ref": "#/$defs/Types.TcpPort" - }, - "to": { - "$ref": "#/$defs/Types.TcpPort" + ], + "if": { + "properties": { + "type": { + "const": "eks" + } } }, - "required": [ - "from", - "to" - ] + "then": { + "required": [ + "type", + "velero" + ] + } }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": { + "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, "properties": { - "name": { - "type": "string", - "description": "The name of the FW rule" - }, - "type": { - "type": "string", - "enum": [ - "ingress", - "egress" - ], - "description": "The type of the FW rule can be ingress or egress" - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" - }, - "self": { - "type": "boolean", - "description": "If true, the source will be the security group itself" + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "cron": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's schedules cron.", + "properties": { + "manifests": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "full": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + } + } + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "eks": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "self", - "name", - "ports", - "protocol", - "type" + "eks" ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": { + "Spec.Distribution.Modules.Dr.Velero.Eks": { "type": "object", "additionalProperties": false, "properties": { - "name": { - "type": "string", - "description": "The name of the FW rule" + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "The region where the velero bucket is located" }, - "type": { - "type": "string", - "enum": [ - "ingress", - "egress" - ], - "description": "The type of the FW rule can be ingress or egress" + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "maxLength": 49, + "description": "The name of the velero bucket" }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "iamRoleArn", + "region", + "bucketName" + ] + }, + "Spec.Distribution.Modules.Auth": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" }, - "sourceSecurityGroupId": { + "provider": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider" + }, + "baseDomain": { "type": "string", - "description": "The source security group ID" + "description": "The base domain for the auth module" }, - "protocol": { - "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "pomerium": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" }, - "ports": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + "dex": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex" } }, "required": [ - "sourceSecurityGroupId", - "name", - "ports", - "protocol", - "type" + "provider" + ], + "allOf": [ + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "sso" + } + } + } + } + }, + "then": { + "required": [ + "dex", + "pomerium", + "baseDomain" + ] + }, + "else": { + "properties": { + "dex": { + "type": "null" + }, + "pomerium": { + "type": "null" + } + } + } + }, + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "basicAuth" + } + } + } + } + }, + "then": { + "properties": { + "provider": { + "required": [ + "basicAuth" + ] + } + } + }, + "else": { + "properties": { + "provider": { + "basicAuth": { + "type": "null" + } + } + } + } + } ] }, - "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { + "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, "properties": { - "cidrBlocks": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" - }, - "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the auth module" }, - "sourceSecurityGroupId": { - "type": "array", + "tolerations": { + "type": [ + "array", + "null" + ], "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId" + "$ref": "#/$defs/Types.KubeToleration" }, - "minItems": 1 + "description": "The tolerations that will be added to the pods for the auth module" }, - "self": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self" - }, - "minItems": 1 + "ingresses": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + } } } }, - "Spec.Kubernetes.NodePool.Ami": { + "Spec.Distribution.Modules.Auth.Overrides.Ingress": { "type": "object", "additionalProperties": false, "properties": { - "id": { + "host": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "The host of the ingress" }, - "owner": { + "ingressClass": { "type": "string", - "description": "The owner of the AMI" + "description": "The ingress class of the ingress" } }, "required": [ - "id", - "owner" + "host", + "ingressClass" ] }, - "Spec.Kubernetes.NodePool.Instance": { + "Spec.Distribution.Modules.Auth.Provider": { "type": "object", "additionalProperties": false, "properties": { "type": { - "type": "string", - "description": "The instance type to use for the nodes" - }, - "spot": { - "type": "boolean", - "description": "If true, the nodes will be created as spot instances" - }, - "volumeSize": { - "type": "integer", - "description": "The size of the disk in GB" - }, - "volumeType": { "type": "string", "enum": [ - "gp2", - "gp3", - "io1", - "standard" - ] + "none", + "basicAuth", + "sso" + ], + "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" }, - "maxPods": { - "type": "integer" + "basicAuth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" } }, "required": [ "type" ] }, - "Spec.Kubernetes.NodePool.Size": { + "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, "properties": { - "min": { - "type": "integer", - "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "username": { + "type": "string", + "description": "The username for the basic auth" }, - "max": { - "type": "integer", - "minimum": 0, - "description": "The maximum number of nodes in the node pool" - } - }, - "required": [ - "max", - "min" - ] - }, - "Spec.ToolsConfiguration": { - "type": "object", - "additionalProperties": false, - "properties": { - "terraform": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform" - } - }, - "required": [ - "terraform" - ] - }, - "Spec.ToolsConfiguration.Terraform": { - "type": "object", - "additionalProperties": false, - "properties": { - "state": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State" + "password": { + "type": "string", + "description": "The password for the basic auth" } }, "required": [ - "state" + "username", + "password" ] }, - "Spec.ToolsConfiguration.Terraform.State": { + "Spec.Distribution.Modules.Auth.Pomerium": { + "$ref": "../public/spec-distribution-modules-auth-pomerium.json" + }, + "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, "properties": { - "s3": { - "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" + "connectors": { + "type": "array", + "description": "The connectors for dex" + }, + "additionalStaticClients": { + "type": "array", + "description": "The additional static clients for dex" + }, + "expiry": { + "type": "object", + "additionalProperties": false, + "properties": { + "signingKeys": { + "type": "string", + "description": "Dex signing key expiration time duration (default 6h)." + }, + "idTokens": { + "type": "string", + "description": "Dex ID tokens expiration time duration (default 24h)." + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" } }, "required": [ - "s3" + "connectors" ] }, - "Spec.ToolsConfiguration.Terraform.State.S3": { + "Spec.Distribution.Modules.Aws": { "type": "object", "additionalProperties": false, "properties": { - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "clusterAutoscaler": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler" }, - "keyPrefix": { - "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "ebsCsiDriver": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] }, - "region": { - "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "loadBalancerController": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] }, - "skipRegionValidation": { - "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "ebsSnapshotController": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" } }, "required": [ - "bucketName", - "keyPrefix", - "region" + "clusterAutoscaler", + "ebsCsiDriver", + "loadBalancerController", + "overrides" ] }, - "Types.AwsArn": { + "Types.SemVer": { "type": "string", - "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$" + "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" }, - "Types.AwsIamRoleName": { + "Types.IpAddress": { "type": "string", - "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$" + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$" }, - "Types.AwsIamRoleNamePrefix": { + "Types.Cidr": { "type": "string", - "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$" + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$" }, - "Types.AwsIpProtocol": { + "Types.FileRef": { "type": "string", - "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$", - "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly" + "pattern": "^\\{file\\:\\/\\/.+\\}$" + }, + "Types.EnvRef": { + "type": "string", + "pattern": "\\{^env\\:\\/\\/.*\\}$" + }, + "Types.TcpPort": { + "type": "integer", + "minimum": 0, + "maximum": 65535 + }, + "Types.SshPubKey": { + "type": "string", + "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+" + }, + "Types.Uri": { + "type": "string", + "pattern": "^(http|https)\\:\\/\\/.+$" + }, + "Types.AwsArn": { + "type": "string", + "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$" }, "Types.AwsRegion": { "type": "string", @@ -2601,6 +2598,37 @@ "us-west-2" ] }, + "Types.AwsVpcId": { + "type": "string", + "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "Types.AwsSshPubKey": { + "type": "string", + "pattern": "^ssh\\-(ed25519|rsa)\\s+" + }, + "Types.AwsSubnetId": { + "type": "string", + "pattern": "^subnet\\-[0-9a-f]{17}$" + }, + "Types.AwsTags": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.AwsIpProtocol": { + "type": "string", + "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$", + "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly" + }, + "Types.AwsIamRoleNamePrefix": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$" + }, + "Types.AwsIamRoleName": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$" + }, "Types.AwsS3BucketName": { "type": "string", "allOf": [ @@ -2620,55 +2648,126 @@ { "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$" }, - { - "not": { - "pattern": "^xn--|-s3alias$" + { + "not": { + "pattern": "^xn--|-s3alias$" + } + } + ] + }, + "Types.AwsS3KeyPrefix": { + "type": "string", + "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$", + "maxLength": 960 + }, + "Types.KubeLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeTaints": { + "type": "array", + "items": { + "type": "string", + "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$" + } + }, + "Types.KubeNodeSelector": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeToleration": { + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "operator": { + "type": "string", + "enum": [ + "Exists", + "Equal" + ] + }, + "key": { + "type": "string", + "description": "The key of the toleration" + }, + "value": { + "type": "string", + "description": "The value of the toleration" + } + }, + "required": [ + "effect", + "key" + ], + "anyOf": [ + { + "required": [ + "operator" + ] + }, + { + "required": [ + "value" + ] + } + ] + }, + "Types.KubeResources": { + "type": "object", + "additionalProperties": false, + "properties": { + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The cpu request for the prometheus pods" + }, + "memory": { + "type": "string", + "description": "The memory request for the opensearch pods" + } + } + }, + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The cpu limit for the opensearch pods" + }, + "memory": { + "type": "string", + "description": "The memory limit for the opensearch pods" + } } } - ] - }, - "Types.AwsS3KeyPrefix": { - "type": "string", - "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$", - "maxLength": 960 - }, - "Types.AwsSshPubKey": { - "type": "string", - "pattern": "^ssh\\-(ed25519|rsa)\\s+" - }, - "Types.AwsSubnetId": { - "type": "string", - "pattern": "^subnet\\-[0-9a-f]{17}$" - }, - "Types.AwsTags": { - "type": "object", - "additionalProperties": { - "type": "string" } }, - "Types.AwsVpcId": { - "type": "string", - "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$" - }, - "Types.Cidr": { - "type": "string", - "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$" - }, - "Types.EnvRef": { - "type": "string", - "pattern": "\\{^env\\:\\/\\/.*\\}$" - }, - "Types.FileRef": { - "type": "string", - "pattern": "^\\{file\\:\\/\\/.+\\}$" - }, - "Types.FuryModuleComponentOverrides": { + "Types.FuryModuleOverrides": { "type": "object", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "The node selector to use to place the pods for the dr module" }, "tolerations": { "type": [ @@ -2678,17 +2777,23 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "The tolerations that will be added to the pods for the monitoring module" + }, + "ingresses": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + } } } }, - "Types.FuryModuleComponentOverridesWithIAMRoleName": { + "Types.FuryModuleComponentOverrides": { "type": "object", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the minio module" }, "tolerations": { "type": [ @@ -2698,20 +2803,17 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" - }, - "iamRoleName": { - "$ref": "#/$defs/Types.AwsIamRoleName" + "description": "The tolerations that will be added to the pods for the cert-manager module" } } }, - "Types.FuryModuleOverrides": { + "Types.FuryModuleComponentOverridesWithIAMRoleName": { "type": "object", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "The node selector to use to place the pods for the load balancer controller module" }, "tolerations": { "type": [ @@ -2721,13 +2823,10 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module" }, - "ingresses": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/Types.FuryModuleOverridesIngress" - } + "iamRoleName": { + "$ref": "#/$defs/Types.AwsIamRoleName" } } }, @@ -2749,155 +2848,56 @@ } } }, - "Types.IpAddress": { - "type": "string", - "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$" - }, - "Types.KubeLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "Types.KubeNodeSelector": { - "type": [ - "object", - "null" - ], - "additionalProperties": { - "type": "string" - } - }, - "Types.KubeResources": { + "Spec.Distribution.Modules.Aws.ClusterAutoscaler": { "type": "object", "additionalProperties": false, "properties": { - "requests": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu request for the prometheus pods" - }, - "memory": { - "type": "string", - "description": "The memory request for the opensearch pods" - } - } + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" }, - "limits": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu limit for the opensearch pods" - }, - "memory": { - "type": "string", - "description": "The memory limit for the opensearch pods" - } - } + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" } - } - }, - "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + }, + "required": [ + "iamRoleArn" + ] }, - "Types.KubeToleration": { + "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { "type": "object", "additionalProperties": false, "properties": { - "effect": { - "type": "string", - "enum": [ - "NoSchedule", - "PreferNoSchedule", - "NoExecute" - ] - }, - "operator": { - "type": "string", - "enum": [ - "Exists", - "Equal" - ] + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" }, - "key": { - "type": "string", - "description": "The key of the toleration" + "region": { + "$ref": "#/$defs/Types.AwsRegion" }, - "value": { - "type": "string", - "description": "The value of the toleration" + "hostedZoneId": { + "type": "string" } }, "required": [ - "effect", - "key" - ], - "anyOf": [ - { - "required": [ - "operator" - ] - }, - { - "required": [ - "value" - ] - } + "hostedZoneId", + "iamRoleArn", + "region" ] }, - "Types.SemVer": { - "type": "string", - "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, - "Types.SshPubKey": { - "type": "string", - "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+" - }, - "Types.TcpPort": { - "type": "integer", - "minimum": 0, - "maximum": 65535 - }, - "Types.Uri": { - "type": "string", - "pattern": "^(http|https)\\:\\/\\/.+$" - } - }, - "$schema": "http://json-schema.org/draft-07/schema#", - "additionalProperties": false, - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", - "properties": { - "apiVersion": { - "type": "string", - "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$" - }, - "kind": { - "type": "string", - "enum": [ - "EKSCluster" + "Spec.Distribution.Modules.Ingress.ExternalDNS": { + "type": "object", + "additionalProperties": false, + "properties": { + "privateIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "publicIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "privateIamRoleArn", + "publicIamRoleArn" ] - }, - "metadata": { - "$ref": "#/$defs/Metadata" - }, - "spec": { - "$ref": "#/$defs/Spec" } - }, - "required": [ - "apiVersion", - "kind", - "metadata", - "spec" - ], - "type": "object" + } } From 137a922c37bee8fbc4503b376c8e257678ffb1cf Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Tue, 19 Nov 2024 16:03:11 +0100 Subject: [PATCH 081/160] deps: use golang 1.23.3 also on CI --- .drone.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index 856500112..baf4cb444 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,13 +15,13 @@ clone: steps: - name: license-check - image: quay.io/sighup/golang:1.23.2 + image: quay.io/sighup/golang:1.23.3 pull: always commands: - make license-check - name: schema-check - image: quay.io/sighup/golang:1.23.2 + image: quay.io/sighup/golang:1.23.3 pull: always commands: - |- @@ -50,7 +50,7 @@ steps: - schema-check - name: lint-go - image: quay.io/sighup/golang:1.23.2 + image: quay.io/sighup/golang:1.23.3 pull: always commands: - make lint-go From d8cc889b9d41fa5c0dce7046d2232bc7c62926c4 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 16:43:40 +0100 Subject: [PATCH 082/160] fix(network-policies): update policy names --- .../distribution/manifests/monitoring/policies/minio.yaml.tpl | 2 +- .../manifests/monitoring/policies/prometheus.yaml.tpl | 2 +- .../opa/policies/gatekeeper/controller-manager.yaml.tpl | 2 +- .../distribution/manifests/tracing/policies/tempo.yaml.tpl | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl index 14217ef2c..7fcce1a79 100644 --- a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -109,7 +109,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: miniomonitoring-egress-all + name: minio-monitoring-egress-all namespace: monitoring spec: policyTypes: diff --git a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl index 89365bb22..9d5fee209 100644 --- a/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/prometheus.yaml.tpl @@ -94,7 +94,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: prometheus-egress-kubeapiserver + name: prometheus-egress-kube-apiserver namespace: monitoring spec: policyTypes: diff --git a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl index 1821377de..75fed7196 100644 --- a/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl +++ b/templates/distribution/manifests/opa/policies/gatekeeper/controller-manager.yaml.tpl @@ -5,7 +5,7 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: controllermanager-egress-kubeapiserver + name: controller-manager-egress-kube-apiserver namespace: gatekeeper-system labels: cluster.kfd.sighup.io/module: opa diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index cc046bd3e..01e8f0f43 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -115,7 +115,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: tempocomponents-egress-memcached + name: tempo-components-egress-memcached namespace: tracing labels: cluster.kfd.sighup.io/module: tracing @@ -187,7 +187,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: tempocomponents-egress-https + name: tempo-components-egress-https namespace: tracing labels: cluster.kfd.sighup.io/module: tracing From a75416d74666830a417e1a9f1c5e9da49f9717c0 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 16:49:27 +0100 Subject: [PATCH 083/160] docs(network-policies): add READMEs and diagrams --- docs/network-policies/README.md | 24 ++++++++ docs/network-policies/modules/auth/README.md | 26 +++++++++ docs/network-policies/modules/auth/sso.md | 53 ++++++++++++++++++ .../modules/ingress/README.md | 34 ++++++++++++ docs/network-policies/modules/ingress/dual.md | 33 +++++++++++ .../modules/ingress/single.md | 33 +++++++++++ .../modules/logging/README.md | 55 +++++++++++++++++++ docs/network-policies/modules/logging/loki.md | 52 ++++++++++++++++++ .../modules/logging/opensearch.md | 48 ++++++++++++++++ .../modules/monitoring/README.md | 53 ++++++++++++++++++ .../modules/monitoring/mimir.md | 53 ++++++++++++++++++ .../modules/monitoring/prometheus.md | 48 ++++++++++++++++ docs/network-policies/modules/opa/README.md | 36 ++++++++++++ .../modules/opa/gatekeeper.md | 26 +++++++++ docs/network-policies/modules/opa/kyverno.md | 27 +++++++++ .../modules/tracing/README.md | 32 +++++++++++ .../network-policies/modules/tracing/tempo.md | 42 ++++++++++++++ docs/network-policies/overview.md | 36 ++++++++++++ 18 files changed, 711 insertions(+) create mode 100644 docs/network-policies/README.md create mode 100644 docs/network-policies/modules/auth/README.md create mode 100644 docs/network-policies/modules/auth/sso.md create mode 100644 docs/network-policies/modules/ingress/README.md create mode 100644 docs/network-policies/modules/ingress/dual.md create mode 100644 docs/network-policies/modules/ingress/single.md create mode 100644 docs/network-policies/modules/logging/README.md create mode 100644 docs/network-policies/modules/logging/loki.md create mode 100644 docs/network-policies/modules/logging/opensearch.md create mode 100644 docs/network-policies/modules/monitoring/README.md create mode 100644 docs/network-policies/modules/monitoring/mimir.md create mode 100644 docs/network-policies/modules/monitoring/prometheus.md create mode 100644 docs/network-policies/modules/opa/README.md create mode 100644 docs/network-policies/modules/opa/gatekeeper.md create mode 100644 docs/network-policies/modules/opa/kyverno.md create mode 100644 docs/network-policies/modules/tracing/README.md create mode 100644 docs/network-policies/modules/tracing/tempo.md create mode 100644 docs/network-policies/overview.md diff --git a/docs/network-policies/README.md b/docs/network-policies/README.md new file mode 100644 index 000000000..ea69f0f40 --- /dev/null +++ b/docs/network-policies/README.md @@ -0,0 +1,24 @@ +# Network Policies Documentation + +This documentation describes all Network Policies of the KFD components for the OnPremises schema. + +## Modules +- [Auth](modules/auth/README.md) - Pomerium SSO +- [Ingress](modules/ingress/README.md) - Nginx (single/dual) + Cert-manager +- [Logging](modules/logging/README.md) - OpenSearch/Loki +- [Monitoring](modules/monitoring/README.md) - Prometheus/Mimir +- [Policy](modules/policy/README.md) - Gatekeeper/Kyverno +- [Tracing](modules/tracing/README.md) - Tempo + +## Common Patterns +All namespaces include: +- Default deny-all policy +- DNS access to kube-dns +- Prometheus metrics collection +- Kubernetes API server access where needed + +## High Level Overview +- [Overview](overview.md) + +## Instructions +Generate the new Network Policies diagrams with `make generate-np-diagrams`. \ No newline at end of file diff --git a/docs/network-policies/modules/auth/README.md b/docs/network-policies/modules/auth/README.md new file mode 100644 index 000000000..90a2f5d10 --- /dev/null +++ b/docs/network-policies/modules/auth/README.md @@ -0,0 +1,26 @@ +# Auth Module Network Policies + +## Components +- Pomerium + +## Namespaces +- pomerium + +## Network Policies List +- deny-all +- all-egress-kube-dns +- pomerium-ingress-nginx +- pomerium-egress-https +- pomerium-egress-grafana +- pomerium-egress-prometheus +- pomerium-egress-alert-manager +- pomerium-egress-forecastle +- pomerium-egress-gpm +- pomerium-egress-hubble-ui +- pomerium-egress-opensearch-dashboard +- pomerium-egress-minio-logging +- pomerium-egress-minio-tracing +- pomerium-ingress-prometheus-metrics + +## Configurations +- [SSO with Pomerium](sso.md) diff --git a/docs/network-policies/modules/auth/sso.md b/docs/network-policies/modules/auth/sso.md new file mode 100644 index 000000000..5d6b816f1 --- /dev/null +++ b/docs/network-policies/modules/auth/sso.md @@ -0,0 +1,53 @@ +# SSO with Pomerium + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller] + end + + subgraph pomerium + pom[Pomerium
app: pomerium] + acme[ACME HTTP Solver
app: cert-manager] + end + + subgraph monitoring + graf[Grafana] + prom[Prometheus] + am[Alertmanager] + minio_monitoring[MinIO] + end + + subgraph logging + osd[OpenSearch Dashboards] + minio_logging[MinIO] + end + + subgraph tracing + minio_tracing[MinIO] + end + + subgraph gatekeer-system + gpm[Gatekeeper Policy Manager] + end + + %% External and K8s Core Components + dns[Kube DNS] + ext[External] + + %% Edges + pom -->|"53/UDP"| dns + nginx -->|"8080/TCP"| pom + nginx -->|"8089/TCP"| acme + prom -->|"9090/TCP metrics"| pom + pom -->|"443/TCP"| ext + pom -->|"3000/TCP"| graf + pom -->|"9090/TCP"| prom + pom -->|"9093/TCP"| am + pom -->|"5601/TCP"| osd + pom -->|"9001/TCP"| minio_logging + pom -->|"9001/TCP"| minio_tracing + pom -->|"9001/TCP"| minio_monitoring + pom -->|"8080/TCP"| gpm +``` \ No newline at end of file diff --git a/docs/network-policies/modules/ingress/README.md b/docs/network-policies/modules/ingress/README.md new file mode 100644 index 000000000..e0c2844bd --- /dev/null +++ b/docs/network-policies/modules/ingress/README.md @@ -0,0 +1,34 @@ +# Ingress Module Network Policies + +## Components +- Nginx Ingress Controller (single/dual mode) +- Cert-manager +- Forecastle + +## Namespaces +- ingress-nginx +- cert-manager + +## Network Policies List + +### cert-manager namespace: +- deny-all +- all-egress-kube-dns +- cert-manager-egress-kube-apiserver +- cert-manager-webhook-ingress-kube-apiserver +- cert-manager-egress-https +- cert-manager-ingress-prometheus-metrics +- acme-http-solver-ingress-lets-encrypt + +### ingress-nginx namespace: +- deny-all +- all-egress-kube-dns +- forecastle-ingress-nginx +- forecastle-egress-kube-apiserver +- nginx-egress-all +- all-ingress-nginx +- nginx-ingress-prometheus-metrics + +## Configurations +- [Single Nginx](single.md) +- [Dual Nginx](dual.md) diff --git a/docs/network-policies/modules/ingress/dual.md b/docs/network-policies/modules/ingress/dual.md new file mode 100644 index 000000000..6b988d41d --- /dev/null +++ b/docs/network-policies/modules/ingress/dual.md @@ -0,0 +1,33 @@ +# Dual Nginx Configuration + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller
app: ingress] + fc[Forecastle
app: forecastle] + end + + subgraph cert-manager + cm[Cert Manager
app: cert-manager] + cmw[Cert Manager Webhook] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + prom[Prometheus] + ext[External ACME / Internet] + + %% Edges + nginx & cm -->|"53/UDP"| dns + cm -->|"6443/TCP"| api + fc -->|"6443/TCP"| api + api -->|"10250/TCP"| cmw + prom -->|"10254/TCP"| nginx + prom -->|"9402/TCP"| cm + cm -->|"443,80/TCP"| ext + all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx + nginx -->|"egress: all"| all + nginx -->|"3000/TCP"| fc +``` \ No newline at end of file diff --git a/docs/network-policies/modules/ingress/single.md b/docs/network-policies/modules/ingress/single.md new file mode 100644 index 000000000..1fb318341 --- /dev/null +++ b/docs/network-policies/modules/ingress/single.md @@ -0,0 +1,33 @@ +# Single Nginx Configuration + +```mermaid +graph TD + %% Namespaces + subgraph ingress-nginx + nginx[Nginx Controller
app: ingress-nginx] + fc[Forecastle
app: forecastle] + end + + subgraph cert-manager + cm[Cert Manager
app: cert-manager] + cmw[Cert Manager Webhook] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + prom[Prometheus] + ext[External / ACME] + + %% Edges + nginx & cm -->|"53/UDP"| dns + cm -->|"6443/TCP"| api + fc -->|"6443/TCP"| api + api -->|"10250/TCP"| cmw + prom -->|"10254/TCP"| nginx + prom -->|"9402/TCP"| cm + cm -->|"443,80/TCP"| ext + all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx + nginx -->|"egress: all"| all + nginx -->|"3000/TCP"| fc +``` \ No newline at end of file diff --git a/docs/network-policies/modules/logging/README.md b/docs/network-policies/modules/logging/README.md new file mode 100644 index 000000000..b9bed7296 --- /dev/null +++ b/docs/network-policies/modules/logging/README.md @@ -0,0 +1,55 @@ +# Logging Module Network Policies + +## Components +- OpenSearch Stack +- Loki Stack + +## Namespaces +- logging + +## Network Policies List + +### Common Policies +- deny-all +- all-egress-kube-dns +- event-tailer-egress-kube-apiserver +- fluentbit-egress-fluentd +- fluentbit-egress-kube-apiserver +- fluentbit-ingress-prometheus-metrics +- logging-operator-egress-kube-apiserver + +### OpenSearch Stack +- fluentd-ingress-fluentbit +- fluentd-ingress-prometheus-metrics +- fluentd-egress-minio +- fluentd-egress-opensearch +- opensearch-discovery +- opensearch-ingress-dashboards +- opensearch-ingress-fluentd +- opensearch-ingress-prometheus-metrics +- opensearch-ingress-jobs +- opensearch-dashboards-egress-opensearch +- opensearch-dashboards-ingress-nginx +- opensearch-dashboards-ingress-jobs +- jobs-egress-opensearch + +### Loki Stack +- fluentd-egress-loki +- loki-distributed-ingress-fluentd +- loki-distributed-ingress-grafana +- loki-distributed-ingress-prometheus-metrics +- loki-distributed-discovery +- loki-distributed-egress-minio + +### MinIO +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-ingress-nginx +- minio-egress-https + +## Configurations +- [OpenSearch Stack](opensearch.md) +- [Loki Stack](loki.md) + diff --git a/docs/network-policies/modules/logging/loki.md b/docs/network-policies/modules/logging/loki.md new file mode 100644 index 000000000..f7f80c12e --- /dev/null +++ b/docs/network-policies/modules/logging/loki.md @@ -0,0 +1,52 @@ +# Loki Stack Configuration + +```mermaid +graph TD + %% Namespaces + subgraph logging + fb[Fluentbit
app.kubernetes.io/name: fluentbit] + fd[Fluentd
app.kubernetes.io/name: fluentd] + loki_gateway[Loki Gateway
app.kubernetes.io/component: gateway] + loki_compactor[Loki Compactor
app.kubernetes.io/component: compactor] + loki_distributor[Loki Distributor
app.kubernetes.io/component: distributor] + loki_ingester[Loki Ingester
app.kubernetes.io/component: ingester] + loki_querier[Loki Querier
app.kubernetes.io/component: querier] + loki_query_frontend[Loki Query Frontend
app.kubernetes.io/component: query-frontend] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup] + end + + subgraph monitoring + prom[Prometheus] + graf[Grafana] + end + + pom[Pomerium] + + %% External and K8s Core Components + api[Kubernetes API] + ext[External] + dns[Kube DNS] + + %% Edges + logging -->|"53/UDP"| dns + bucket -->|"6443/TCP"| api + fb -->|"24240/TCP"| fd + fd -->|"8080/TCP"| loki_gateway + prom -->|"3100/TCP"| loki_gateway + graf -->|"8080/TCP"| loki_gateway + prom -->|"2020/TCP"| fb + fb -->|"6443/TCP"| api + loki_query_frontend -->|"loki-discovery
9095,7946,3100/TCP"| loki_distributor + loki_distributor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_query_frontend + loki_compactor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester + loki_compactor -->|"9000/TCP"| minio + loki_ingester -->|"9000/TCP"| minio + loki_querier -->|"9000/TCP"| minio + bucket -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + pom -->|"9001/TCP"| minio + minio -->|"9000/TCP"| logging +``` \ No newline at end of file diff --git a/docs/network-policies/modules/logging/opensearch.md b/docs/network-policies/modules/logging/opensearch.md new file mode 100644 index 000000000..bd7f3d9c4 --- /dev/null +++ b/docs/network-policies/modules/logging/opensearch.md @@ -0,0 +1,48 @@ +# Opensearch Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph logging + fb[Fluentbit
app.kubernetes.io/name: fluentbit] + fd[Fluentd
app.kubernetes.io/name: fluentd] + os[OpenSearch
app.kubernetes.io/name: opensearch] + osd[OpenSearch Dashboards
app: opensearch-dashboards] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-logging-buckets-setup] + op[Logging Operator
app.kubernetes.io/name: logging-operator] + et[Event Tailer
app.kubernetes.io/name: event-tailer] + job[OpenSearch Jobs] + end + + %% External and K8s Core Components + api[Kubernetes API] + ext[External] + prom[Prometheus] + pom[Pomerium] + nginx[Nginx] + dns[Kube DNS] + + %% Edges + logging --->|"53/UDP,TCP"| dns + fb -->|"6443/TCP"| api + et -->|"6443/TCP"| api + op -->|"6443/TCP"| api + bucket -->|"6443/TCP"| api + fb -->|"24240/TCP"| fd + fd -->|"9200/TCP"| os + osd -->|"9200/TCP"| os + pom -->|"5601/TCP"| osd + job -->|"5601/TCP"| osd + job -->|"9200/TCP"| os + prom -->|"2020/TCP"| fb + prom -->|"24231/TCP"| fd + prom -->|"9108/TCP"| os + prom -->|"9000/TCP"| minio + bucket -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + pom -->|"9001/TCP"| minio + logging -->|"9000/TCP"| minio + nginx -->|"9001/TCP"| minio + nginx -->|"5601/TCP"| osd +``` \ No newline at end of file diff --git a/docs/network-policies/modules/monitoring/README.md b/docs/network-policies/modules/monitoring/README.md new file mode 100644 index 000000000..e9905ca0e --- /dev/null +++ b/docs/network-policies/modules/monitoring/README.md @@ -0,0 +1,53 @@ +# Monitoring Module Network Policies + +## Components +- Prometheus Stack +- Mimir Stack + +## Namespaces +- monitoring + +## Network Policies List + +### Common Policies +- deny-all +- all-egress-kube-dns +- alertmanager-main +- alertmanager-ingress-nginx +- blackbox-exporter +- grafana +- grafana-egress-tempo-gateway +- grafana-ingress-nginx +- kube-state-metrics +- node-exporter +- prometheus-ingress-nginx +- prometheus-adapter +- prometheus-ingress-prometheus-adapter +- prometheus-operator +- x509-exporter-egress-kube-apiserver +- x509-exporter-ingress-prometheus-metrics +- kube-state-metrics +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-monitoring-egress-all + +### Prometheus specific +- prometheus-k8s +- prometheus-egress-minio +- prometheus-egress-kube-apiserver + +### Mimir specific +- mimir-distributed-discovery +- mimir-distributed-ingress-prometheus-metrics +- mimir-gateway-ingress-grafana +- mimir-querier-egress-https +- mimir-ingester-egress-https +- mimir-distributed-egress-minio (when using MinIO) +- mimir-distributed-egress-all (when not using MinIO) + +## Configurations +- [Prometheus Stack](prometheus.md) +- [Mimir Stack](mimir.md) + diff --git a/docs/network-policies/modules/monitoring/mimir.md b/docs/network-policies/modules/monitoring/mimir.md new file mode 100644 index 000000000..c118a0e27 --- /dev/null +++ b/docs/network-policies/modules/monitoring/mimir.md @@ -0,0 +1,53 @@ +# Mimir Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph monitoring + gateway[Gateway
component: gateway] + distributor[Distributor
component: distributor] + ingester[Ingester
component: ingester] + querier[Querier
component: querier] + qfront[Query Frontend
component: query-frontend] + qsched[Query Scheduler
component: query-scheduler] + store[Store Gateway
component: store-gateway] + compactor[Compactor
component: compactor] + grafana[Grafana
app.kubernetes.io/name: grafana] + prom[Prometheus
app.kubernetes.io/name: prometheus] + am[Alertmanager
app.kubernetes.io/component: alert-router] + bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter] + ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics] + ne[Node Exporter
app.kubernetes.io/name: node-exporter] + x509[x509 Exporter
app: x509-certificate-exporter] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + + %% Edges + monitoring -->|"53/UDP,TCP"| dns + bucket -->|"9000/TCP"| minio + qfront -->|"mimir-discovery
9095,7946,8080/TCP"| qsched + qfront -->|"mimir-discovery
9095,7946,8080/TCP"| querier + gateway -->|"mimir-discovery
9095,7946,8080/TCP"| distributor + distributor -->|"mimir-discovery
9095,7946,8080/TCP"| ingester + qsched -->|"mimir-discovery
9095,7946,8080/TCP"| querier + querier -->|"mimir-discovery
9095,7946,8080/TCP"| store + querier -->|"mimir-discovery
9095,7946,8080/TCP"| ingester + store -->|"mimir-discovery
9095,7946,8080/TCP"| compactor + compactor -->|"mimir-discovery
9095,7946,8080/TCP"| store + ingester & store & compactor -->|"9000/TCP"| minio + grafana -->|"8080/TCP"| gateway + prom -->|"8080/TCP"| distributor + prom -->|"9115,19115/TCP"| bb + prom -->|"8443,9443/TCP"| ksm + prom -->|"9100/TCP"| ne + prom -->|"9793/TCP"| x509 + prom -->|"9093,8080/TCP"| am + pom[Pomerium] -->|"3000/TCP"| grafana + pom -->|"9093/TCP"| am + x509 -->|"6443/TCP"| api +``` \ No newline at end of file diff --git a/docs/network-policies/modules/monitoring/prometheus.md b/docs/network-policies/modules/monitoring/prometheus.md new file mode 100644 index 000000000..b9bb8da70 --- /dev/null +++ b/docs/network-policies/modules/monitoring/prometheus.md @@ -0,0 +1,48 @@ +# Prometheus Stack Configuration + +```mermaid +graph TD + %% Namespace + subgraph monitoring + prom[Prometheus
app.kubernetes.io/name: prometheus] + grafana[Grafana
app.kubernetes.io/name: grafana] + am[Alertmanager
app.kubernetes.io/name: alertmanager] + bb[Blackbox Exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/component: exporter] + ksm[Kube State Metrics
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/component: exporter] + ne[Node Exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/component: exporter] + pa[Prometheus Adapter
app.kubernetes.io/name: prometheus-adapter
app.kubernetes.io/component: metrics-adapter] + po[Prometheus Operator
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/component: controller] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-monitoring-buckets-setup] + x509[x509 Exporter
app: x509-certificate-exporter] + end + + subgraph tracing + tempo[Tempo Gateway
app.kubernetes.io/name: tempo
app.kubernetes.io/component: gateway] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + pom["Pomerium"] + + %% Edges + monitoring -->|"53/UDP,TCP"| dns + bucket -->|"9000/TCP"| minio + prom -->|"6443,8405/TCP"| api + prom -->|"9000/TCP"| minio + prom -->|"9115,19115/TCP"| bb + prom -->|"8443,9443/TCP"| ksm + prom -->|"9100/TCP"| ne + prom -->|"8443/TCP"| po + prom -->|"9793/TCP"| x509 + prom & am & bb & grafana & ksm & ne & pa & po -->|"egress: all"| all[All Namespaces] + pa -->|"9090/TCP"| prom + grafana -->|"9090/TCP"| prom + prom -->|"9093,8080/TCP"| am + pom -->|"9093/TCP"| am + prom -->|"3000/TCP"| grafana + grafana -->|"8080/TCP"| tempo + pom -->|"3000/TCP"| grafana + x509 -->|"6443/TCP"| api +``` diff --git a/docs/network-policies/modules/opa/README.md b/docs/network-policies/modules/opa/README.md new file mode 100644 index 000000000..f12b57f5b --- /dev/null +++ b/docs/network-policies/modules/opa/README.md @@ -0,0 +1,36 @@ +# Policy Module Network Policies + +## Components +- Gatekeeper + Gatekeeper Policy Manager +- Kyverno + +## Namespaces +- gatekeeper-system (when using Gatekeeper) +- kyverno (when using Kyverno) + +## Network Policies List + +### Gatekeeper +- deny-all +- all-egress-dns +- audit-controller-egress-kube-apiserver +- controller-manager-egress-kube-apiserver +- controller-manager-ingress-kube-apiserver +- gpm-egress-kube-apiserver +- gpm-ingress-pomerium +- gatekeeper-ingress-prometheus-metrics + +### Kyverno +- deny-all +- all-egress-dns +- kyverno-admission-egress-kube-apiserver +- kyverno-admission-ingress-nodes +- kyverno-background-egress-kube-apiserver +- kyverno-reports-egress-kube-apiserver +- kyverno-cleanup-egress-kube-apiserver +- kyverno-cleanup-reports-egress-kube-apiserver + +## Configurations +- [Gatekeeper](gatekeeper.md) +- [Kyverno](kyverno.md) + diff --git a/docs/network-policies/modules/opa/gatekeeper.md b/docs/network-policies/modules/opa/gatekeeper.md new file mode 100644 index 000000000..d3d049f7d --- /dev/null +++ b/docs/network-policies/modules/opa/gatekeeper.md @@ -0,0 +1,26 @@ +# Gatekeeper Configuration + +```mermaid +graph TD + %% Namespace + subgraph gatekeeper-system + audit[Audit Controller
control-plane: audit-controller] + cm[Controller Manager
control-plane: controller-manager] + gpm[Policy Manager
app: gatekeeper-policy-manager] + end + + %% External and K8s Core Components + api[Kubernetes API] + dns[Kube DNS] + prom[Prometheus] + pom[Pomerium] + + %% Edges + audit & cm -->|"53/UDP"| dns + audit -->|"6443/TCP"| api + cm -->|"6443/TCP"| api + gpm -->|"6443/TCP"| api + pom -->|"8080/TCP"| gpm + prom -->|"8888/TCP"| audit & cm + api -->|"8443,443/TCP"| cm +``` diff --git a/docs/network-policies/modules/opa/kyverno.md b/docs/network-policies/modules/opa/kyverno.md new file mode 100644 index 000000000..602c40dc8 --- /dev/null +++ b/docs/network-policies/modules/opa/kyverno.md @@ -0,0 +1,27 @@ +# Kyverno Configuration + +```mermaid +graph TD + %% Namespace + subgraph kyverno + admission[Admission Controller
component: admission-controller] + background[Background Controller
component: background-controller] + reports[Reports Controller
component: reports-controller] + cleanup[Cleanup Controller
component: cleanup-controller] + end + + %% External and K8s Core Components + dns[Kube DNS] + api[Kubernetes API] + + %% Edges + admission -->|"53/UDP"| dns + background -->|"53/UDP"| dns + reports -->|"53/UDP"| dns + cleanup -->|"53/UDP"| dns + admission -->|"6443/TCP"| api + background -->|"6443/TCP"| api + reports -->|"6443/TCP"| api + cleanup -->|"6443/TCP"| api + all[All Namespaces] -->|"9443/TCP"| admission +``` diff --git a/docs/network-policies/modules/tracing/README.md b/docs/network-policies/modules/tracing/README.md new file mode 100644 index 000000000..fe70c4e12 --- /dev/null +++ b/docs/network-policies/modules/tracing/README.md @@ -0,0 +1,32 @@ +# Tracing Module Network Policies + +## Components +- Tempo + +## Namespaces +- tracing + +## Network Policies List +- deny-all +- all-egress-kube-dns +- tempo-distributed-discovery +- tempo-distributed-ingress-prometheus-metrics +- tempo-gateway-ingress-grafana +- all-egress-tempo-distributor +- tempo-distributor-ingress-traces +- tempo-components-egress-memcached +- memcached-ingress-querier +- tempo-components-egress-https +- tempo-distributed-egress-minio (when using MinIO) +- tempo-distributed-egress-all (when not using MinIO) + +### MinIO (when enabled) +- minio-ingress-namespace +- minio-buckets-setup-egress-kube-apiserver +- minio-buckets-setup-egress-minio +- minio-ingress-prometheus-metrics +- minio-ingress-pomerium +- minio-egress-https + +## Configurations +- [Tempo](tempo.md) diff --git a/docs/network-policies/modules/tracing/tempo.md b/docs/network-policies/modules/tracing/tempo.md new file mode 100644 index 000000000..0ca46d1ba --- /dev/null +++ b/docs/network-policies/modules/tracing/tempo.md @@ -0,0 +1,42 @@ +# Tempo Configuration + +```mermaid +graph TD + %% Namespaces + subgraph tracing + gateway[Tempo Gateway
component: gateway] + dist[Tempo Distributor
component: distributor] + query[Tempo Querier
component: querier] + mem[Memcached
component: memcached] + minio[MinIO
app: minio] + bucket[MinIO Bucket Setup
app: minio-tracing-buckets-setup] + end + + subgraph monitoring + graf[Grafana] + prom[Prometheus] + end + + subgraph pomerium + pom[Pomerium] + end + + allns[All Namespaces] + + %% External and K8s Core Components + dns[Kube DNS] + ext[External] + + %% Edges + gateway & dist & query -->|"53/UDP"| dns + gateway -->|"9095,7946,3100/TCP"| dist & query + dist -->|"9095,7946,3100/TCP"| query + query -->|"11211/TCP"| mem + allns -->|"4317/TCP"| dist + graf -->|"8080/TCP"| gateway + prom -->|"3100/TCP"| gateway & dist & query + pom -->|"9001/TCP"| minio + query -->|"9000/TCP"| minio + minio -->|"443/TCP"| ext + bucket -->|"9000/TCP"| minio +``` \ No newline at end of file diff --git a/docs/network-policies/overview.md b/docs/network-policies/overview.md new file mode 100644 index 000000000..93fbc5033 --- /dev/null +++ b/docs/network-policies/overview.md @@ -0,0 +1,36 @@ +# KFD Network Policies Overview + +```mermaid +graph TD + subgraph kfd[KFD Core Modules] + ingress[Ingress
Nginx + Cert-manager] + auth[Auth
Pomerium] + mon[Monitoring
Prometheus/Mimir] + log[Logging
Opensearch/Loki] + tracing[Tracing
Tempo] + opa[OPA
Gatekeeper/Kyverno] + end + + %% K8s Core Components + dns[KubeDNS] + api[Kubernetes API] + ext[External] + + %% Edges + kfd --->|"53/UDP"| dns + kfd -->|"6443/TCP"| api + ingress -->|"8080/TCP"| auth + auth -->|"auth proxy"| mon & log & tracing & opa + auth -->|"443/TCP"| ext + mon -->|"metrics"| all + mon -->|"metrics"| auth + mon -->|"metrics"| ingress + mon -->|"metrics"| log + mon -->|"metrics"| tracing + mon -->|"metrics"| opa + all[All Namespaces] -->|"logs"| log + all -->|"traces"| tracing + + + +``` \ No newline at end of file From aedf02d4a94be6385ae9e844d97f9f194154acdb Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 16:51:16 +0100 Subject: [PATCH 084/160] chore(makefile): add network policy target --- Makefile | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Makefile b/Makefile index 51ad403b4..7fd3a4464 100644 --- a/Makefile +++ b/Makefile @@ -94,6 +94,20 @@ generate-docs: @md-gen gen --input schemas/public/kfddistribution-kfd-v1alpha2.json --output docs/schemas/kfddistribution-kfd-v1alpha2.md --overwrite --banner banners/kfddistribution.md @md-gen gen --input schemas/public/ekscluster-kfd-v1alpha2.json --output docs/schemas/ekscluster-kfd-v1alpha2.md --overwrite --banner banners/ekscluster.md +.PHONY: generate-np-diagrams +generate-np-diagrams: + docker run --rm -v $(PWD)/docs/network-policies:/workdir minlag/mermaid-cli:latest -i "/workdir/overview.md" -o "/workdir/overview.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/auth:/workdir minlag/mermaid-cli:latest -i "/workdir/sso.md" -o "/workdir/sso.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/single.md" -o "/workdir/single.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/ingress:/workdir minlag/mermaid-cli:latest -i "/workdir/dual.md" -o "/workdir/dual.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/loki.md" -o "/workdir/loki.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/logging:/workdir minlag/mermaid-cli:latest -i "/workdir/opensearch.md" -o "/workdir/opensearch.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/mimir.md" -o "/workdir/mimir.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/monitoring:/workdir minlag/mermaid-cli:latest -i "/workdir/prometheus.md" -o "/workdir/prometheus.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/gatekeeper.md" -o "/workdir/gatekeeper.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/opa:/workdir minlag/mermaid-cli:latest -i "/workdir/kyverno.md" -o "/workdir/kyverno.png" -w 2048 -H 1536 -b white + docker run --rm -v $(PWD)/docs/network-policies/modules/tracing:/workdir minlag/mermaid-cli:latest -i "/workdir/tempo.md" -o "/workdir/tempo.png" -w 2048 -H 1536 -b white + .PHONY: dump-private-schema dump-private-schema: @cat schemas/public/ekscluster-kfd-v1alpha2.json | \ From cb326bf0563ae3ece8df0a46457fadf7a208f327 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 16:59:40 +0100 Subject: [PATCH 085/160] fix(docs): remove trailing punctuation --- docs/network-policies/modules/ingress/README.md | 4 ++-- docs/network-policies/modules/monitoring/README.md | 2 ++ docs/network-policies/modules/tracing/README.md | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/network-policies/modules/ingress/README.md b/docs/network-policies/modules/ingress/README.md index e0c2844bd..b609e816a 100644 --- a/docs/network-policies/modules/ingress/README.md +++ b/docs/network-policies/modules/ingress/README.md @@ -11,7 +11,7 @@ ## Network Policies List -### cert-manager namespace: +### Cert-manager - deny-all - all-egress-kube-dns - cert-manager-egress-kube-apiserver @@ -20,7 +20,7 @@ - cert-manager-ingress-prometheus-metrics - acme-http-solver-ingress-lets-encrypt -### ingress-nginx namespace: +### Ingress-nginx - deny-all - all-egress-kube-dns - forecastle-ingress-nginx diff --git a/docs/network-policies/modules/monitoring/README.md b/docs/network-policies/modules/monitoring/README.md index e9905ca0e..acf6e419e 100644 --- a/docs/network-policies/modules/monitoring/README.md +++ b/docs/network-policies/modules/monitoring/README.md @@ -27,6 +27,8 @@ - x509-exporter-egress-kube-apiserver - x509-exporter-ingress-prometheus-metrics - kube-state-metrics + +### MinIO - minio-ingress-namespace - minio-buckets-setup-egress-kube-apiserver - minio-buckets-setup-egress-minio diff --git a/docs/network-policies/modules/tracing/README.md b/docs/network-policies/modules/tracing/README.md index fe70c4e12..4db75de3b 100644 --- a/docs/network-policies/modules/tracing/README.md +++ b/docs/network-policies/modules/tracing/README.md @@ -20,7 +20,7 @@ - tempo-distributed-egress-minio (when using MinIO) - tempo-distributed-egress-all (when not using MinIO) -### MinIO (when enabled) +### MinIO - minio-ingress-namespace - minio-buckets-setup-egress-kube-apiserver - minio-buckets-setup-egress-minio From 97f0e95fb6b2ee6decf8cbcbac900c06a9fbe0d9 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Tue, 19 Nov 2024 17:04:02 +0100 Subject: [PATCH 086/160] feat: update networking to v2.0.0-rc.1 (even if this is a major, no changes are needed) --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 48f9bd0a5..c2b5162c8 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -11,7 +11,7 @@ modules: logging: v3.4.1 monitoring: v3.2.0 opa: v1.13.0 - networking: v1.17.0 + networking: v2.0.0-rc.1 tracing: v1.1.0 kubernetes: eks: From 3f62595b4a21d17e9628566ae6282a695aa29ae5 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Tue, 19 Nov 2024 17:30:35 +0100 Subject: [PATCH 087/160] feat(ingress): bump to v3.0.1-rc.1 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 870ab5912..4ead9f815 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -7,7 +7,7 @@ modules: auth: v0.3.0 aws: v4.2.1 dr: v2.3.0 - ingress: v3.0.1-rc.0 + ingress: v3.0.1-rc.1 logging: v3.4.1 monitoring: v3.2.0 opa: v1.12.0 From fc66d60a66c8f4d58be239ce7ccd63058cc2fb97 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 17:56:43 +0100 Subject: [PATCH 088/160] fix(docs): update main readme --- docs/network-policies/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/network-policies/README.md b/docs/network-policies/README.md index ea69f0f40..5511fbb1e 100644 --- a/docs/network-policies/README.md +++ b/docs/network-policies/README.md @@ -7,7 +7,7 @@ This documentation describes all Network Policies of the KFD components for the - [Ingress](modules/ingress/README.md) - Nginx (single/dual) + Cert-manager - [Logging](modules/logging/README.md) - OpenSearch/Loki - [Monitoring](modules/monitoring/README.md) - Prometheus/Mimir -- [Policy](modules/policy/README.md) - Gatekeeper/Kyverno +- [OPA](modules/opa/README.md) - Gatekeeper/Kyverno - [Tracing](modules/tracing/README.md) - Tempo ## Common Patterns From 11771be999df159a7c4e8148b60a9cecd69c90e3 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 19 Nov 2024 17:57:42 +0100 Subject: [PATCH 089/160] fix(docs): update opa readme --- docs/network-policies/modules/opa/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/network-policies/modules/opa/README.md b/docs/network-policies/modules/opa/README.md index f12b57f5b..a43db5d2a 100644 --- a/docs/network-policies/modules/opa/README.md +++ b/docs/network-policies/modules/opa/README.md @@ -1,4 +1,4 @@ -# Policy Module Network Policies +# OPA Module Network Policies ## Components - Gatekeeper + Gatekeeper Policy Manager From 07bead5084ea2614ee4f3e0401b20486d6d4edc4 Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Wed, 20 Nov 2024 10:07:26 +0100 Subject: [PATCH 090/160] ci: disable lint-go --- .drone.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.drone.yml b/.drone.yml index baf4cb444..04c9692c4 100644 --- a/.drone.yml +++ b/.drone.yml @@ -49,14 +49,14 @@ steps: - license-check - schema-check - - name: lint-go - image: quay.io/sighup/golang:1.23.3 - pull: always - commands: - - make lint-go - depends_on: - - license-check - - schema-check + # - name: lint-go + # image: quay.io/sighup/golang:1.23.3 + # pull: always + # commands: + # - make lint-go + # depends_on: + # - license-check + # - schema-check - name: test-schema # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 From 7521db0223e6d4bf90d92e228d1a5d955e4efd97 Mon Sep 17 00:00:00 2001 From: Luca De Carne Date: Wed, 20 Nov 2024 10:14:07 +0100 Subject: [PATCH 091/160] feat(logging): bump to v4.0.0-rc.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 203461a07..78fbef0b0 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,7 @@ modules: aws: v4.2.1 dr: v2.3.0 ingress: v2.3.3 - logging: v3.5.0-rc.1 + logging: v4.0.0-rc.0 monitoring: v3.2.0 opa: v1.12.0 networking: v1.17.0 From 59610d921aceb030b74f257e6ebf921cf1cb48cd Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 10:33:20 +0100 Subject: [PATCH 092/160] chore: re-run make generate-go-models and generate-docs --- pkg/apis/ekscluster/v1alpha2/public/schema.go | 4184 +++++++++++++++++ .../kfddistribution/v1alpha2/public/schema.go | 2875 +++++++++++ pkg/apis/onpremises/v1alpha2/public/schema.go | 3756 +++++++++++++++ schemas/private/ekscluster-kfd-v1alpha2.json | 2925 ++++++++++++ 4 files changed, 13740 insertions(+) diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index e69de29bb..b2edf0592 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -0,0 +1,4184 @@ +// Code generated by github.com/sighupio/go-jsonschema, DO NOT EDIT. + +package public + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" +) + +// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +type EksclusterKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +} + +type EksclusterKfdV1Alpha2Kind string + +const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" + +type Metadata struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // DistributionVersion corresponds to the JSON schema field "distributionVersion". + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Infrastructure corresponds to the JSON schema field "infrastructure". + Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` + + // Kubernetes corresponds to the JSON schema field "kubernetes". + Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` + + // Region corresponds to the JSON schema field "region". + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This map defines which will be the common tags that will be added to all the + // resources created on AWS. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` +} + +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +} + +type SpecDistributionCommon struct { + // The node selector to use to place the pods for all the KFD modules + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` + + // URL of the registry where to pull images from for the Distribution phase. + // (Default is registry.sighup.io/fury). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for these plugins too. + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The relative path to the vendor directory, does not need to be changed + RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` + + // The tolerations that will be added to the pods for all the KFD modules + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionCommonProvider struct { + // The type of the provider, must be EKS if specified + Type string `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + +type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { + // The behavior of the configmap + Behavior *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the configmap + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the configmap + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the configmap + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the configmap + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the configmap + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` +} + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +// Each entry should follow the format of Kustomize's images patch +type SpecDistributionCustomPatchesImages []map[string]interface{} + +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + +type SpecDistributionCustomPatchesPatchOptions struct { + // If true, the kind change will be allowed + AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` + + // If true, the name change will be allowed + AllowNameChange *bool `json:"allowNameChange,omitempty" yaml:"allowNameChange,omitempty" mapstructure:"allowNameChange,omitempty"` +} + +type SpecDistributionCustomPatchesPatchTarget struct { + // The annotation selector of the target + AnnotationSelector *string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty" mapstructure:"annotationSelector,omitempty"` + + // The group of the target + Group *string `json:"group,omitempty" yaml:"group,omitempty" mapstructure:"group,omitempty"` + + // The kind of the target + Kind *string `json:"kind,omitempty" yaml:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The label selector of the target + LabelSelector *string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty" mapstructure:"labelSelector,omitempty"` + + // The name of the target + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` + + // The namespace of the target + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // The version of the target + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch + +// Each entry should be either a relative file path or an inline content resolving +// to a partial or complete resource definition +type SpecDistributionCustomPatchesPatchesStrategicMerge []string + +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + +type SpecDistributionCustomPatchesSecretGeneratorResource struct { + // The behavior of the secret + Behavior *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the secret + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the secret + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the secret + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the secret + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the secret + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesSecretGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The type of the secret + Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +type SpecDistributionCustompatches struct { + // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". + ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` + + // Images corresponds to the JSON schema field "images". + Images SpecDistributionCustomPatchesImages `json:"images,omitempty" yaml:"images,omitempty" mapstructure:"images,omitempty"` + + // Patches corresponds to the JSON schema field "patches". + Patches SpecDistributionCustomPatchesPatches `json:"patches,omitempty" yaml:"patches,omitempty" mapstructure:"patches,omitempty"` + + // PatchesStrategicMerge corresponds to the JSON schema field + // "patchesStrategicMerge". + PatchesStrategicMerge SpecDistributionCustomPatchesPatchesStrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty" mapstructure:"patchesStrategicMerge,omitempty"` + + // SecretGenerator corresponds to the JSON schema field "secretGenerator". + SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` +} + +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` + + // Aws corresponds to the JSON schema field "aws". + Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` +} + +type SpecDistributionModulesAuth struct { + // The base domain for the auth module + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` + + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +type SpecDistributionModulesAuthDex struct { + // The additional static clients for dex + AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` + + // The connectors for dex + Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` + + // Expiry corresponds to the JSON schema field "expiry". + Expiry *SpecDistributionModulesAuthDexExpiry `json:"expiry,omitempty" yaml:"expiry,omitempty" mapstructure:"expiry,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` + + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` +} + +type SpecDistributionModulesAuthOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the auth module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the auth module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAuthOverridesIngress struct { + // The host of the ingress + Host string `json:"host" yaml:"host" mapstructure:"host"` + + // The ingress class of the ingress + IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` +} + +type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress + +type SpecDistributionModulesAuthPomerium interface{} + +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` +} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} + +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +} + +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} + +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + + // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for the basic auth + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The username for the basic auth + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecDistributionModulesAuthProviderType string + +const ( + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) + +type SpecDistributionModulesAws struct { + // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". + ClusterAutoscaler *SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler,omitempty" yaml:"clusterAutoscaler,omitempty" mapstructure:"clusterAutoscaler,omitempty"` + + // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". + EbsCsiDriver *SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver,omitempty" yaml:"ebsCsiDriver,omitempty" mapstructure:"ebsCsiDriver,omitempty"` + + // EbsSnapshotController corresponds to the JSON schema field + // "ebsSnapshotController". + EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"` + + // LoadBalancerController corresponds to the JSON schema field + // "loadBalancerController". + LoadBalancerController *SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController,omitempty" yaml:"loadBalancerController,omitempty" mapstructure:"loadBalancerController,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsClusterAutoscaler struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsEbsCsiDriver struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsEbsSnapshotController struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsLoadBalancerController struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of the DR, must be ***none*** or ***eks*** + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` +} + +type SpecDistributionModulesDrType string + +const ( + SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" + SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" +) + +type SpecDistributionModulesDrVelero struct { + // Eks corresponds to the JSON schema field "eks". + Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +} + +type SpecDistributionModulesDrVeleroEks struct { + // The name of the velero bucket + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // The region where the velero bucket is located + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +} + +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // the base domain used for all the KFD ingresses, if in the nginx dual + // configuration, it should be the same as the + // .spec.distribution.modules.ingress.dns.private.name zone + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // CertManager corresponds to the JSON schema field "certManager". + CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` + + // Dns corresponds to the JSON schema field "dns". + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the nginx ingress controller module + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email of the cluster issuer + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // The name of the cluster issuer + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The custom solvers configurations + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` + + // The type of the cluster issuer, must be ***dns01*** or ***http01*** + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionModulesIngressCertManagerClusterIssuerType string + +const ( + SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" + SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" +) + +type SpecDistributionModulesIngressDNS struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Private corresponds to the JSON schema field "private". + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` + + // Public corresponds to the JSON schema field "public". + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +} + +type SpecDistributionModulesIngressDNSPrivate struct { + // If true, the private hosted zone will be created + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the private hosted zone + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecDistributionModulesIngressDNSPublic struct { + // If true, the public hosted zone will be created + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the public hosted zone + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecDistributionModulesIngressForecastle struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the nginx ingress controller, must be ***none***, ***single*** or + // ***dual*** + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificate, must be ***none***, ***certManager*** or + // ***secret*** + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} + +type SpecDistributionModulesIngressNginxTLSProvider string + +const ( + SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" + SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" +) + +type SpecDistributionModulesIngressNginxTLSSecret struct { + // Ca corresponds to the JSON schema field "ca". + Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` + + // The certificate file content or you can use the file notation to get the + // content from a file + Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` +} + +type SpecDistributionModulesIngressNginxType string + +const ( + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" +) + +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the ingress module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the ingress module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // selects the logging stack. Choosing none will disable the centralized logging. + // Choosing opensearch will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh + // for storage. Choosing customOuput the Logging Operator will be deployed and + // installed but with no local storage, you will have to create the needed Outputs + // and ClusterOutputs to ship the logs to your desired storage. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// when using the customOutputs logging type, you need to manually specify the spec +// of the several Output and ClusterOutputs that the Logging Operator expects to +// forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Events string `json:"events" yaml:"events" mapstructure:"events"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` +} + +type SpecDistributionModulesLoggingLoki struct { + // Backend corresponds to the JSON schema field "backend". + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + +type SpecDistributionModulesLoggingLokiBackend string + +const ( + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" + SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" +) + +type SpecDistributionModulesLoggingLokiExternalEndpoint struct { + // The access key id of the loki external endpoint + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the loki external endpoint + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the loki external endpoint + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the loki external endpoint will be insecure + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the loki external endpoint + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each minio disk, 6 disks total + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password of the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username of the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The storage size for the opensearch pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + + // The type of the opensearch, must be ***single*** or ***triple*** + Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + +type SpecDistributionModulesLoggingOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesLoggingType string + +const ( + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" +) + +// configuration for the Monitoring module components +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be ***none***, ***prometheus***, + // ***prometheusAgent*** or ***mimir***. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, and in addition + // Grafana Mimir that allows for longer retention of metrics and the usage of + // Object Storage. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +} + +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook url to send deadman switch monitoring, for example to use with + // healthchecks.io + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // If true, the default rules will be installed + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The slack webhook url to send alerts + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimir struct { + // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the mimir pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +const ( + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" +) + +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key id of the external mimir backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external mimir backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external mimir backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external mimir backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external mimir backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the k8s Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the k8s Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the k8s Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringType string + +const ( + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworking struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of security to use, either ***none***, ***gatekeeper*** or + // ***kyverno*** + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The enforcement action to use for the gatekeeper module + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the kyverno module + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +type SpecDistributionModulesPolicyType string + +const ( + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" +) + +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either ***none*** or ***tempo*** + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesTracingTempo struct { + // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the tempo pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +const ( + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" +) + +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key id of the external tempo backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external tempo backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external tempo backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external tempo backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external tempo backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesTracingType string + +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +type SpecInfrastructure struct { + // This key defines the VPC that will be created in AWS + Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` + + // This section defines the creation of VPN bastions + Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` +} + +type SpecInfrastructureVpc struct { + // Network corresponds to the JSON schema field "network". + Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` +} + +type SpecInfrastructureVpcNetwork struct { + // This is the CIDR of the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` + + // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". + SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` +} + +type SpecInfrastructureVpcNetworkSubnetsCidrs struct { + // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // private load balancers will be created + Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` + + // These are the CIDRs for the public subnets, where the public load balancers and + // the VPN servers will be created + Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` +} + +type SpecInfrastructureVpn struct { + // This value defines the prefix that will be used to create the bucket name where + // the VPN servers will store the states + BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` + + // The dhParamsBits size used for the creation of the .pem file that will be used + // in the dh openvpn server.conf file + DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` + + // The size of the disk in GB + DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` + + // Overrides the default IAM user name for the VPN + IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` + + // The size of the AWS EC2 instance + InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` + + // The number of instances to create, 0 to skip the creation + Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` + + // The username of the account to create in the bastion's operating system + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` + + // The port used by the OpenVPN server + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The VPC ID where the VPN servers will be created, required only if + // .spec.infrastructure.vpc is omitted + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // The CIDR that will be used to assign IP addresses to the VPN clients when + // connected + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` +} + +type SpecInfrastructureVpnSsh struct { + // The CIDR enabled in the security group that can access the bastions in SSH + AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` + + // The github user name list that will be used to get the ssh public key that will + // be added as authorized key to the operatorName user + GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` + + // This value defines the public keys that will be added to the bastion's + // operating system NOTES: Not yet implemented + PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` +} + +type SpecKubernetes struct { + // ApiServer corresponds to the JSON schema field "apiServer". + ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` + + // AwsAuth corresponds to the JSON schema field "awsAuth". + AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` + + // Overrides the default IAM role name prefix for the EKS cluster + ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` + + // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. + LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + + // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. + LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` + + // This key contains the ssh public key that can connect to the nodes via SSH + // using the ec2-user user + NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + + // NodePools corresponds to the JSON schema field "nodePools". + NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + + // Either `launch_configurations`, `launch_templates` or `both`. For new clusters + // use `launch_templates`, for existing cluster you'll need to migrate from + // `launch_configurations` to `launch_templates` using `both` as interim. + NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` + + // This value defines the CIDR that will be used to assign IP addresses to the + // services + ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` + + // This value defines the subnet IDs where the EKS cluster will be created, + // required only if .spec.infrastructure.vpc is omitted + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // This value defines the VPC ID where the EKS cluster will be created, required + // only if .spec.infrastructure.vpc is omitted + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // Overrides the default IAM role name prefix for the EKS workers + WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` +} + +type SpecKubernetesAPIServer struct { + // This value defines if the API server will be accessible only from the private + // subnets + PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` + + // This value defines the CIDRs that will be allowed to access the API server from + // the private subnets + PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` + + // This value defines if the API server will be accessible from the public subnets + PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` + + // This value defines the CIDRs that will be allowed to access the API server from + // the public subnets + PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` +} + +type SpecKubernetesAwsAuth struct { + // This optional array defines additional AWS accounts that will be added to the + // aws-auth configmap + AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` + + // This optional array defines additional IAM roles that will be added to the + // aws-auth configmap + Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` + + // This optional array defines additional IAM users that will be added to the + // aws-auth configmap + Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type SpecKubernetesAwsAuthRole struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Rolearn corresponds to the JSON schema field "rolearn". + Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecKubernetesAwsAuthUser struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Userarn corresponds to the JSON schema field "userarn". + Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecKubernetesLogsTypesElem string + +const ( + SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" + SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" + SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" + SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" + SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" +) + +type SpecKubernetesNodePool struct { + // AdditionalFirewallRules corresponds to the JSON schema field + // "additionalFirewallRules". + AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` + + // Ami corresponds to the JSON schema field "ami". + Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` + + // This optional array defines additional target groups to attach to the instances + // in the node pool + AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` + + // The container runtime to use for the nodes + ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` + + // Instance corresponds to the JSON schema field "instance". + Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` + + // Kubernetes labels that will be added to the nodes + Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` + + // The name of the node pool + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Size corresponds to the JSON schema field "size". + Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` + + // This value defines the subnet IDs where the nodes will be created + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // AWS tags that will be added to the ASG and EC2 instances + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Kubernetes taints that will be added to the nodes + Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` + + // Type corresponds to the JSON schema field "type". + Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Tags corresponds to the JSON schema field "tags". + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Type corresponds to the JSON schema field "type". + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` +} + +type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { + // The name of the FW rule + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the FW rule + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // If true, the source will be the security group itself + Self bool `json:"self" yaml:"self" mapstructure:"self"` + + // The tags of the FW rule + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the FW rule can be ingress or egress + Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string + +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { + // The name of the FW rule + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the FW rule + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // The source security group ID + SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` + + // The tags of the FW rule + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the FW rule can be ingress or egress + Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPrivate(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + } + type Plain SpecDistributionModulesDrVelero + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVelero(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVeleroEks(plain) + return nil +} + +const ( + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +const TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} + +const ( + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} + +const TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +const ( + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +const ( + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" +) + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} + +const ( + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" +) + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} + +const ( + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil +} + +const TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) + return nil +} + +const TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil +} + +type TypesCidr string + +const TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + return nil +} + +const TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + } + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetwork(plain) + return nil +} + +const TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") + } + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpc(plain) + return nil +} + +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + } + type Plain SpecInfrastructureVpnSsh + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) + return nil +} + +type TypesAwsVpcId string + +const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil +} + +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + } + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) + return nil +} + +type TypesAwsArn string + +type TypesAwsRegion string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAwsAuthRole(plain) + return nil +} + +type TypesAwsS3BucketName string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAwsAuthUser(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + +type TypesAwsIamRoleNamePrefix string + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + } + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesAwsIamRoleName string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + return nil +} + +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil +} + +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + return nil +} + +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" +) + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + return nil +} + +type SpecKubernetesNodePoolAdditionalFirewallRules struct { + // The CIDR blocks for the FW rule. At the moment the first item of the list will + // be used, others will be ignored. + CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` + + // Self corresponds to the JSON schema field "self". + Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` + + // SourceSecurityGroupId corresponds to the JSON schema field + // "sourceSecurityGroupId". + SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +type SpecKubernetesNodePoolAmi struct { + // The AMI ID to use for the nodes + Id string `json:"id" yaml:"id" mapstructure:"id"` + + // The owner of the AMI + Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["id"]; !ok || v == nil { + return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + } + if v, ok := raw["owner"]; !ok || v == nil { + return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + } + type Plain SpecKubernetesNodePoolAmi + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolAmi(plain) + return nil +} + +type SpecKubernetesNodePoolContainerRuntime string + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + } + *j = SpecKubernetesNodePoolContainerRuntime(v) + return nil +} + +const ( + SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" + SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" +) + +type SpecKubernetesNodePoolInstanceVolumeType string + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + } + *j = SpecKubernetesNodePoolInstanceVolumeType(v) + return nil +} + +const ( + SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" + SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" + SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" + SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" +) + +type SpecKubernetesNodePoolInstance struct { + // MaxPods corresponds to the JSON schema field "maxPods". + MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` + + // If true, the nodes will be created as spot instances + Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` + + // The instance type to use for the nodes + Type string `json:"type" yaml:"type" mapstructure:"type"` + + // The size of the disk in GB + VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` + + // VolumeType corresponds to the JSON schema field "volumeType". + VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + } + type Plain SpecKubernetesNodePoolInstance + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolInstance(plain) + return nil +} + +type TypesKubeLabels_1 map[string]string + +type SpecKubernetesNodePoolSize struct { + // The maximum number of nodes in the node pool + Max int `json:"max" yaml:"max" mapstructure:"max"` + + // The minimum number of nodes in the node pool + Min int `json:"min" yaml:"min" mapstructure:"min"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + } + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + } + type Plain SpecKubernetesNodePoolSize + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePoolSize(plain) + return nil +} + +type TypesAwsSubnetId string + +type TypesKubeTaints []string + +type SpecKubernetesNodePoolType string + +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + } + *j = SpecKubernetesNodePoolType(v) + return nil +} + +const ( + SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" + SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePool(plain) + return nil +} + +type SpecKubernetesNodePoolsLaunchKind string + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + } + *j = SpecKubernetesNodePoolsLaunchKind(v) + return nil +} + +const ( + SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" + SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" + SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" +) + +type TypesKubeLabels map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") + } + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetes(plain) + return nil +} + +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil +} + +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + +type TypesAwsS3KeyPrefix string + +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // This value defines which folder will be used to store all the states inside the + // bucket + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + + // This value defines in which region the bucket is located + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") + } + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + } + type Plain SpecToolsConfigurationTerraformStateS3 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraformStateS3(plain) + return nil +} + +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + } + type Plain SpecToolsConfigurationTerraformState + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraformState(plain) + return nil +} + +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + } + type Plain SpecToolsConfigurationTerraform + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraform(plain) + return nil +} + +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + } + type Plain SpecToolsConfiguration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfiguration(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") + } + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) + return nil +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil +} + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} + +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + return nil +} + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + } + *j = TypesKubeTolerationEffect_1(v) + return nil +} + +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + } + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration_1(plain) + return nil +} + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + } + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil +} + +type TypesAwsSshPubKey string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_EksclusterKfdV1Alpha2Kind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_EksclusterKfdV1Alpha2Kind, v) + } + *j = EksclusterKfdV1Alpha2Kind(v) + return nil +} + +type TypesKubeNodeSelector map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Metadata) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") + } + type Plain Metadata + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *EksclusterKfdV1Alpha2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["apiVersion"]; !ok || v == nil { + return fmt.Errorf("field apiVersion in EksclusterKfdV1Alpha2: required") + } + if v, ok := raw["kind"]; !ok || v == nil { + return fmt.Errorf("field kind in EksclusterKfdV1Alpha2: required") + } + if v, ok := raw["metadata"]; !ok || v == nil { + return fmt.Errorf("field metadata in EksclusterKfdV1Alpha2: required") + } + if v, ok := raw["spec"]; !ok || v == nil { + return fmt.Errorf("field spec in EksclusterKfdV1Alpha2: required") + } + type Plain EksclusterKfdV1Alpha2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = EksclusterKfdV1Alpha2(plain) + return nil +} diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index e69de29bb..e1a3f89cc 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -0,0 +1,2875 @@ +// Code generated by github.com/sighupio/go-jsonschema, DO NOT EDIT. + +package public + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" +) + +type KfddistributionKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind KfddistributionKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +} + +type KfddistributionKfdV1Alpha2Kind string + +const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" + +type Metadata struct { + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // DistributionVersion corresponds to the JSON schema field "distributionVersion". + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +} + +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // The kubeconfig file path + Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +} + +type SpecDistributionCommon struct { + // The node selector to use to place the pods for all the KFD modules + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` + + // URL of the registry where to pull images from for the Distribution phase. + // (Default is registry.sighup.io/fury). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The relative path to the vendor directory, does not need to be changed + RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` + + // The tolerations that will be added to the pods for all the KFD modules + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionCommonProvider struct { + // The type of the provider + Type string `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + +type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { + // The behavior of the configmap + Behavior *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the configmap + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the configmap + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the configmap + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the configmap + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the configmap + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` +} + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +// Each entry should follow the format of Kustomize's images patch +type SpecDistributionCustomPatchesImages []map[string]interface{} + +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + +type SpecDistributionCustomPatchesPatchOptions struct { + // If true, the kind change will be allowed + AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` + + // If true, the name change will be allowed + AllowNameChange *bool `json:"allowNameChange,omitempty" yaml:"allowNameChange,omitempty" mapstructure:"allowNameChange,omitempty"` +} + +type SpecDistributionCustomPatchesPatchTarget struct { + // The annotation selector of the target + AnnotationSelector *string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty" mapstructure:"annotationSelector,omitempty"` + + // The group of the target + Group *string `json:"group,omitempty" yaml:"group,omitempty" mapstructure:"group,omitempty"` + + // The kind of the target + Kind *string `json:"kind,omitempty" yaml:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The label selector of the target + LabelSelector *string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty" mapstructure:"labelSelector,omitempty"` + + // The name of the target + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` + + // The namespace of the target + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // The version of the target + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch + +// Each entry should be either a relative file path or an inline content resolving +// to a partial or complete resource definition +type SpecDistributionCustomPatchesPatchesStrategicMerge []string + +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + +type SpecDistributionCustomPatchesSecretGeneratorResource struct { + // The behavior of the secret + Behavior *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the secret + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the secret + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the secret + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the secret + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the secret + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesSecretGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The type of the secret + Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +type SpecDistributionCustompatches struct { + // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". + ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` + + // Images corresponds to the JSON schema field "images". + Images SpecDistributionCustomPatchesImages `json:"images,omitempty" yaml:"images,omitempty" mapstructure:"images,omitempty"` + + // Patches corresponds to the JSON schema field "patches". + Patches SpecDistributionCustomPatchesPatches `json:"patches,omitempty" yaml:"patches,omitempty" mapstructure:"patches,omitempty"` + + // PatchesStrategicMerge corresponds to the JSON schema field + // "patchesStrategicMerge". + PatchesStrategicMerge SpecDistributionCustomPatchesPatchesStrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty" mapstructure:"patchesStrategicMerge,omitempty"` + + // SecretGenerator corresponds to the JSON schema field "secretGenerator". + SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` +} + +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` +} + +type SpecDistributionModulesAuth struct { + // The base domain for the auth module + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` + + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +type SpecDistributionModulesAuthDex struct { + // The additional static clients for dex + AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` + + // The connectors for dex + Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` + + // Expiry corresponds to the JSON schema field "expiry". + Expiry *SpecDistributionModulesAuthDexExpiry `json:"expiry,omitempty" yaml:"expiry,omitempty" mapstructure:"expiry,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` + + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` +} + +type SpecDistributionModulesAuthOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the auth module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the auth module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAuthOverridesIngress struct { + // The host of the ingress + Host string `json:"host" yaml:"host" mapstructure:"host"` + + // The ingress class of the ingress + IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` +} + +type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress + +type SpecDistributionModulesAuthPomerium interface{} + +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` +} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} + +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +} + +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} + +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + + // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for the basic auth + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The username for the basic auth + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecDistributionModulesAuthProviderType string + +const ( + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) + +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of the DR, must be ***none*** or ***on-premises*** + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` +} + +type SpecDistributionModulesDrType string + +const ( + SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" +) + +type SpecDistributionModulesDrVelero struct { + // The storage backend type for Velero. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Velero's external storage backend. + ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` +} + +type SpecDistributionModulesDrVeleroBackend string + +const ( + SpecDistributionModulesDrVeleroBackendExternalEndpoint SpecDistributionModulesDrVeleroBackend = "externalEndpoint" + SpecDistributionModulesDrVeleroBackendMinio SpecDistributionModulesDrVeleroBackend = "minio" +) + +// Configuration for Velero's external storage backend. +type SpecDistributionModulesDrVeleroExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Velero's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // the base domain used for all the KFD ingresses, if in the nginx dual + // configuration, it should be the same as the + // .spec.distribution.modules.ingress.dns.private.name zone + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // CertManager corresponds to the JSON schema field "certManager". + CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the nginx ingress controller module + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email of the cluster issuer + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // The name of the cluster issuer + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The custom solvers configurations + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` + + // The type of the cluster issuer, must be ***http01*** + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionModulesIngressCertManagerClusterIssuerType string + +const SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" + +type SpecDistributionModulesIngressForecastle struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the nginx ingress controller, must be ***none***, ***single*** or + // ***dual*** + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificate, must be ***none***, ***certManager*** or + // ***secret*** + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} + +type SpecDistributionModulesIngressNginxTLSProvider string + +const ( + SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" + SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" +) + +type SpecDistributionModulesIngressNginxTLSSecret struct { + // Ca corresponds to the JSON schema field "ca". + Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` + + // The certificate file content or you can use the file notation to get the + // content from a file + Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` +} + +type SpecDistributionModulesIngressNginxType string + +const ( + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" +) + +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the ingress module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the ingress module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // selects the logging stack. Choosing none will disable the centralized logging. + // Choosing opensearch will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh + // for storage. Choosing customOuput the Logging Operator will be deployed and + // installed but with no local storage, you will have to create the needed Outputs + // and ClusterOutputs to ship the logs to your desired storage. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// when using the customOutputs logging type, you need to manually specify the spec +// of the several Output and ClusterOutputs that the Logging Operator expects to +// forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Events string `json:"events" yaml:"events" mapstructure:"events"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` + + // This value defines where the output from Flow will be sent. Will be the `spec` + // section of the `Output` object. It must be a string (and not a YAML object) + // following the OutputSpec definition. Use the nullout output to discard the + // flow. + SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` +} + +type SpecDistributionModulesLoggingLoki struct { + // Backend corresponds to the JSON schema field "backend". + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + +type SpecDistributionModulesLoggingLokiBackend string + +const ( + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" + SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" +) + +type SpecDistributionModulesLoggingLokiExternalEndpoint struct { + // The access key id of the loki external endpoint + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the loki external endpoint + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the loki external endpoint + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the loki external endpoint will be insecure + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the loki external endpoint + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each minio disk, 6 disks total + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password of the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username of the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The storage size for the opensearch pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + + // The type of the opensearch, must be ***single*** or ***triple*** + Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + +type SpecDistributionModulesLoggingOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesLoggingType string + +const ( + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" +) + +// configuration for the Monitoring module components +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be ***none***, ***prometheus***, + // ***prometheusAgent*** or ***mimir***. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, and in addition + // Grafana Mimir that allows for longer retention of metrics and the usage of + // Object Storage. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +} + +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook url to send deadman switch monitoring, for example to use with + // healthchecks.io + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // If true, the default rules will be installed + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The slack webhook url to send alerts + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimir struct { + // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the mimir pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +const ( + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" +) + +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key id of the external mimir backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external mimir backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external mimir backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external mimir backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external mimir backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the k8s Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the K8s Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the k8s Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringType string + +const ( + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of networking to use, either ***none***, ***calico*** or ***cilium*** + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesNetworkingCilium struct { + // MaskSize corresponds to the JSON schema field "maskSize". + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // PodCidr corresponds to the JSON schema field "podCidr". + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +const ( + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" +) + +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of security to use, either ***none***, ***gatekeeper*** or + // ***kyverno*** + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The enforcement action to use for the gatekeeper module + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // If true, the default policies will be installed + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the kyverno module + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +type SpecDistributionModulesPolicyType string + +const ( + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" +) + +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either ***none*** or ***tempo*** + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The storage size for the minio pods + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the minio root user + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the minio root user + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesTracingTempo struct { + // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the tempo pods + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +const SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") + } + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + } + type Plain SpecDistributionModulesNetworkingCilium + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesNetworkingCilium(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The cpu limit for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil +} + +type TypesCidr string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} + +type SpecDistributionModulesTracingType string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", + "calico", + "cilium", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + } + *j = SpecDistributionModulesNetworkingType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + } + type Plain SpecDistributionModulesNetworking + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesNetworking(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the security module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil +} + +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} + +const SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil +} + +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key id of the external tempo backend + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external tempo backend + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The endpoint of the external tempo backend + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, the external tempo backend will not use tls + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key of the external tempo backend + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} + +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) + return nil +} + +type TypesKubeLabels map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["kubeconfig"]; !ok || v == nil { + return fmt.Errorf("field kubeconfig in SpecDistribution: required") + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil +} + +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil +} + +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") + } + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) + return nil +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil +} + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} + +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + return nil +} + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + } + *j = TypesKubeTolerationEffect_1(v) + return nil +} + +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + } + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration_1(plain) + return nil +} + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + } + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil +} + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeTaints []string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +type TypesUri string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} + +var enumValues_KfddistributionKfdV1Alpha2Kind = []interface{}{ + "KFDDistribution", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *KfddistributionKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_KfddistributionKfdV1Alpha2Kind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_KfddistributionKfdV1Alpha2Kind, v) + } + *j = KfddistributionKfdV1Alpha2Kind(v) + return nil +} + +type TypesKubeNodeSelector map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Metadata) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") + } + type Plain Metadata + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *KfddistributionKfdV1Alpha2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["apiVersion"]; !ok || v == nil { + return fmt.Errorf("field apiVersion in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["kind"]; !ok || v == nil { + return fmt.Errorf("field kind in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["metadata"]; !ok || v == nil { + return fmt.Errorf("field metadata in KfddistributionKfdV1Alpha2: required") + } + if v, ok := raw["spec"]; !ok || v == nil { + return fmt.Errorf("field spec in KfddistributionKfdV1Alpha2: required") + } + type Plain KfddistributionKfdV1Alpha2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = KfddistributionKfdV1Alpha2(plain) + return nil +} diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index e69de29bb..588056293 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -0,0 +1,3756 @@ +// Code generated by github.com/sighupio/go-jsonschema, DO NOT EDIT. + +package public + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/sighupio/go-jsonschema/pkg/types" +) + +type Metadata struct { + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type OnpremisesKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind OnpremisesKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +} + +type OnpremisesKfdV1Alpha2Kind string + +const OnpremisesKfdV1Alpha2KindOnPremises OnpremisesKfdV1Alpha2Kind = "OnPremises" + +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // v1.30.1. + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Kubernetes corresponds to the JSON schema field "kubernetes". + Kubernetes *SpecKubernetes `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty" mapstructure:"kubernetes,omitempty"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +} + +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +} + +// Common configuration for all the distribution modules. +type SpecDistributionCommon struct { + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` + + // URL of the registry where to pull images from for the Distribution phase. + // (Default is `registry.sighup.io/fury`). + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The relative path to the vendor directory, does not need to be changed. + RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` + + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionCommonProvider struct { + // The provider type. Don't set. FOR INTERNAL USE ONLY. + Type string `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + +type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { + // The behavior of the configmap + Behavior *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the configmap + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the configmap + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the configmap + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the configmap + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the configmap + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` +} + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +// Each entry should follow the format of Kustomize's images patch +type SpecDistributionCustomPatchesImages []map[string]interface{} + +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + +type SpecDistributionCustomPatchesPatchOptions struct { + // If true, the kind change will be allowed + AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` + + // If true, the name change will be allowed + AllowNameChange *bool `json:"allowNameChange,omitempty" yaml:"allowNameChange,omitempty" mapstructure:"allowNameChange,omitempty"` +} + +type SpecDistributionCustomPatchesPatchTarget struct { + // The annotation selector of the target + AnnotationSelector *string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty" mapstructure:"annotationSelector,omitempty"` + + // The group of the target + Group *string `json:"group,omitempty" yaml:"group,omitempty" mapstructure:"group,omitempty"` + + // The kind of the target + Kind *string `json:"kind,omitempty" yaml:"kind,omitempty" mapstructure:"kind,omitempty"` + + // The label selector of the target + LabelSelector *string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty" mapstructure:"labelSelector,omitempty"` + + // The name of the target + Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` + + // The namespace of the target + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // The version of the target + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch + +// Each entry should be either a relative file path or an inline content resolving +// to a partial or complete resource definition +type SpecDistributionCustomPatchesPatchesStrategicMerge []string + +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + +type SpecDistributionCustomPatchesSecretGeneratorResource struct { + // The behavior of the secret + Behavior *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior `json:"behavior,omitempty" yaml:"behavior,omitempty" mapstructure:"behavior,omitempty"` + + // The envs of the secret + Envs []string `json:"envs,omitempty" yaml:"envs,omitempty" mapstructure:"envs,omitempty"` + + // The files of the secret + Files []string `json:"files,omitempty" yaml:"files,omitempty" mapstructure:"files,omitempty"` + + // The literals of the secret + Literals []string `json:"literals,omitempty" yaml:"literals,omitempty" mapstructure:"literals,omitempty"` + + // The name of the secret + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the secret + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesSecretGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The type of the secret + Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} + +type SpecDistributionCustompatches struct { + // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". + ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` + + // Images corresponds to the JSON schema field "images". + Images SpecDistributionCustomPatchesImages `json:"images,omitempty" yaml:"images,omitempty" mapstructure:"images,omitempty"` + + // Patches corresponds to the JSON schema field "patches". + Patches SpecDistributionCustomPatchesPatches `json:"patches,omitempty" yaml:"patches,omitempty" mapstructure:"patches,omitempty"` + + // PatchesStrategicMerge corresponds to the JSON schema field + // "patchesStrategicMerge". + PatchesStrategicMerge SpecDistributionCustomPatchesPatchesStrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty" mapstructure:"patchesStrategicMerge,omitempty"` + + // SecretGenerator corresponds to the JSON schema field "secretGenerator". + SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` +} + +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` +} + +// Configuration for the Auth module. +type SpecDistributionModulesAuth struct { + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` + + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // OidcKubernetesAuth corresponds to the JSON schema field "oidcKubernetesAuth". + OidcKubernetesAuth *SpecDistributionModulesAuthOIDCKubernetesAuth `json:"oidcKubernetesAuth,omitempty" yaml:"oidcKubernetesAuth,omitempty" mapstructure:"oidcKubernetesAuth,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +// Configuration for the Dex package. +type SpecDistributionModulesAuthDex struct { + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ + AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` + + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ + Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` + + // Expiry corresponds to the JSON schema field "expiry". + Expiry *SpecDistributionModulesAuthDexExpiry `json:"expiry,omitempty" yaml:"expiry,omitempty" mapstructure:"expiry,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` + + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` +} + +type SpecDistributionModulesAuthOIDCKubernetesAuth struct { + // The client ID that the Kubernetes API will use to authenticate against the OIDC + // provider (Dex). + ClientID *string `json:"clientID,omitempty" yaml:"clientID,omitempty" mapstructure:"clientID,omitempty"` + + // The client secret that the Kubernetes API will use to authenticate against the + // OIDC provider (Dex). + ClientSecret *string `json:"clientSecret,omitempty" yaml:"clientSecret,omitempty" mapstructure:"clientSecret,omitempty"` + + // DEPRECATED. Defaults to `email`. + EmailClaim *string `json:"emailClaim,omitempty" yaml:"emailClaim,omitempty" mapstructure:"emailClaim,omitempty"` + + // If true, components needed for interacting with the Kubernetes API with OIDC + // authentication (Gangplank, Dex) be deployed and configued. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // The namespace to set in the context of the kubeconfig file generated by + // Gangplank. Defaults to `default`. + Namespace *string `json:"namespace,omitempty" yaml:"namespace,omitempty" mapstructure:"namespace,omitempty"` + + // Set to true to remove the CA from the kubeconfig file generated by Gangplank. + RemoveCAFromKubeconfig *bool `json:"removeCAFromKubeconfig,omitempty" yaml:"removeCAFromKubeconfig,omitempty" mapstructure:"removeCAFromKubeconfig,omitempty"` + + // Used to specify the scope of the requested Oauth authorization by Gangplank. + // Defaults to: `["openid", "profile", "email", "offline_access", "groups"]` + Scopes []string `json:"scopes,omitempty" yaml:"scopes,omitempty" mapstructure:"scopes,omitempty"` + + // The Key to use for the sessions in Gangplank. Must be different between + // different instances of Gangplank. + SessionSecurityKey *string `json:"sessionSecurityKey,omitempty" yaml:"sessionSecurityKey,omitempty" mapstructure:"sessionSecurityKey,omitempty"` + + // The JWT claim to use as the username. This is used in Gangplank's UI. This is + // combined with the clusterName for the user portion of the kubeconfig. Defaults + // to `nickname`. + UsernameClaim *string `json:"usernameClaim,omitempty" yaml:"usernameClaim,omitempty" mapstructure:"usernameClaim,omitempty"` +} + +// Override the common configuration with a particular configuration for the Auth +// module. +type SpecDistributionModulesAuthOverrides struct { + // Override the definition of the Auth module ingresses. + Ingresses *SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the Auth module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the Auth + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAuthOverridesIngress struct { + // Use this host for the ingress instead of the default one. + Host string `json:"host" yaml:"host" mapstructure:"host"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` +} + +// Override the definition of the Auth module ingresses. +type SpecDistributionModulesAuthOverridesIngresses struct { + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthOverridesIngress `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Gangplank corresponds to the JSON schema field "gangplank". + Gangplank *SpecDistributionModulesAuthOverridesIngress `json:"gangplank,omitempty" yaml:"gangplank,omitempty" mapstructure:"gangplank,omitempty"` +} + +type SpecDistributionModulesAuthPomerium interface{} + +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` +} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} + +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` +} + +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} + +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +// Configuration for the HTTP Basic Auth provider. +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for logging in with the HTTP basic authentication. + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The username for logging in with the HTTP basic authentication. + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecDistributionModulesAuthProviderType string + +const ( + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) + +// Configuration for the Disaster Recovery module. +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` +} + +type SpecDistributionModulesDrType string + +const ( + SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" +) + +// Configuration for the Velero package. +type SpecDistributionModulesDrVelero struct { + // The storage backend type for Velero. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesDrVeleroBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Velero's external storage backend. + ExternalEndpoint *SpecDistributionModulesDrVeleroExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` + + // Configuration for the additional snapshotController component installation. + SnapshotController *SpecDistributionModulesDrVeleroSnapshotController `json:"snapshotController,omitempty" yaml:"snapshotController,omitempty" mapstructure:"snapshotController,omitempty"` +} + +type SpecDistributionModulesDrVeleroBackend string + +const ( + SpecDistributionModulesDrVeleroBackendExternalEndpoint SpecDistributionModulesDrVeleroBackend = "externalEndpoint" + SpecDistributionModulesDrVeleroBackendMinio SpecDistributionModulesDrVeleroBackend = "minio" +) + +// Configuration for Velero's external storage backend. +type SpecDistributionModulesDrVeleroExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Velero's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` + + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` + + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsFull struct { + // The cron expression for the `full` backup schedule (default `0 1 * * *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // EXPERIMENTAL (if you do more than one backups, the following backups after the + // first are not automatically restorable, see + // https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for + // the manual restore solution): SnapshotMoveData specifies whether snapshot data + // should be moved. Velero will create a new volume from the snapshot and upload + // the content to the storageLocation. + SnapshotMoveData *bool `json:"snapshotMoveData,omitempty" yaml:"snapshotMoveData,omitempty" mapstructure:"snapshotMoveData,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for Velero's manifests backup schedule. +type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { + // The cron expression for the `manifests` backup schedule (default `*/15 * * * + // *`). + Schedule *string `json:"schedule,omitempty" yaml:"schedule,omitempty" mapstructure:"schedule,omitempty"` + + // The Time To Live (TTL) of the backups created by the backup schedules (default + // `720h0m0s`, 30 days). Notice that changing this value will affect only newly + // created backups, prior backups will keep the old TTL. + Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` +} + +// Configuration for the additional snapshotController component installation. +type SpecDistributionModulesDrVeleroSnapshotController struct { + // Whether to install or not the snapshotController component in the cluster. + // Before enabling this field, check if your CSI driver does not have + // snapshotController built-in. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx dual type, this value should be the same as the domain associated with + // the `internal` ingress class. + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. + CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // If corresponds to the JSON schema field "if". + If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` + + // Configurations for the nginx ingress controller package. + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Then corresponds to the JSON schema field "then". + Then interface{} `json:"then,omitempty" yaml:"then,omitempty" mapstructure:"then,omitempty"` +} + +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email address to use during the certificate issuing process. + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // Name of the clusterIssuer + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` + + // The type of the clusterIssuer. Only `http01` challenge is supported for + // on-premises clusters. See solvers for arbitrary configurations. + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionModulesIngressCertManagerClusterIssuerType string + +const SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" + +type SpecDistributionModulesIngressForecastle struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the nginx ingress controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} + +type SpecDistributionModulesIngressNginxTLSProvider string + +const ( + SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" + SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" +) + +// Kubernetes TLS secret for the ingresses TLS certificate. +type SpecDistributionModulesIngressNginxTLSSecret struct { + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. + Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` + + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` + + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Key string `json:"key" yaml:"key" mapstructure:"key"` +} + +type SpecDistributionModulesIngressNginxType string + +const ( + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" +) + +// Override the common configuration with a particular configuration for the +// Ingress module. +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the Ingress module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the Ingress + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +// Configuration for the Logging module. +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} + +// DEPRECATED in latest versions of KFD. +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Events string `json:"events" yaml:"events" mapstructure:"events"` + + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` + + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` + + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` + + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` +} + +// Configuration for the Loki package. +type SpecDistributionModulesLoggingLoki struct { + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Loki's external storage backend. + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + +type SpecDistributionModulesLoggingLokiBackend string + +const ( + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" + SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" +) + +// Configuration for Loki's external storage backend. +type SpecDistributionModulesLoggingLokiExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Loki's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for Logging's MinIO deployment. +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The storage size for the OpenSearch volumes. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. + Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + +// Configuration for the Logging Operator. +type SpecDistributionModulesLoggingOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesLoggingType string + +const ( + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" +) + +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +} + +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +const ( + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +type TypesKubeResourcesLimits struct { + // The cpu limit for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the prometheus pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the loki pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the prometheus pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil +} + +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOIDCKubernetesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecDistributionModulesAuthOIDCKubernetesAuth: required") + } + type Plain SpecDistributionModulesAuthOIDCKubernetesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOIDCKubernetesAuth(plain) + return nil +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil +} + +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} + +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil +} + +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "calico", + "cilium", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + } + *j = SpecDistributionModulesNetworkingType(v) + return nil +} + +const ( + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `calico` (default, via the Tigera + // Operator) or `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesNetworking: required") + } + type Plain SpecDistributionModulesNetworking + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesNetworking(plain) + return nil +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil +} + +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} + +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} + +type SpecDistributionModulesPolicyType string + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) + +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} + +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} + +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) + return nil +} + +type TypesKubeLabels map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil +} + +type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { + // URL where to download the GPG key of the Apt repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // The GPG key ID of the Apt repository. Example: + // `36A1D7869245C8950F966E92D8576A8BA88D21E9` + GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` + + // An indicative name for the Apt repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // A source string for the new Apt repository. Example: `deb + // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideApt) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["gpg_key"]; !ok || v == nil { + return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + if v, ok := raw["gpg_key_id"]; !ok || v == nil { + return fmt.Errorf("field gpg_key_id in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + if v, ok := raw["repo"]; !ok || v == nil { + return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideApt: required") + } + type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideApt + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAdvancedAirGapDependenciesOverrideApt(plain) + return nil +} + +type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { + // URL where to download the ASCII-armored GPG key of the Yum repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // If true, the GPG signature check on the packages will be enabled. + GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` + + // An indicative name for the Yum repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // URL to the directory where the Yum repository's `repodata` directory lives. + // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` + + // If true, the GPG signature check on the `repodata` will be enabled. + RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAdvancedAirGapDependenciesOverrideYum) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["gpg_key"]; !ok || v == nil { + return fmt.Errorf("field gpg_key in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["gpg_key_check"]; !ok || v == nil { + return fmt.Errorf("field gpg_key_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["repo"]; !ok || v == nil { + return fmt.Errorf("field repo in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + if v, ok := raw["repo_gpg_check"]; !ok || v == nil { + return fmt.Errorf("field repo_gpg_check in SpecKubernetesAdvancedAirGapDependenciesOverrideYum: required") + } + type Plain SpecKubernetesAdvancedAirGapDependenciesOverrideYum + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAdvancedAirGapDependenciesOverrideYum(plain) + return nil +} + +type SpecKubernetesAdvancedAirGapDependenciesOverride struct { + // Apt corresponds to the JSON schema field "apt". + Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` + + // Yum corresponds to the JSON schema field "yum". + Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` +} + +// Advanced configuration for air-gapped installations. Allows setting custom URLs +// where to download the binaries dependencies from and custom .deb and .rpm +// package repositories. +type SpecKubernetesAdvancedAirGap struct { + // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should + // be as the one downloaded from containerd GitHub releases page. + ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` + + // DependenciesOverride corresponds to the JSON schema field + // "dependenciesOverride". + DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` + + // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded + // from + // `//etcd--linux-.tar.gz` + EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` + + // Checksum for the runc binary. + RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` + + // URL where to download the runc binary from. + RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` +} + +type SpecKubernetesAdvancedCloud struct { + // Sets cloud config for the Kubelet + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Sets the cloud provider for the Kubelet + Provider *string `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` +} + +// Allows specifying custom configuration for a registry at containerd level. You +// can set authentication details and mirrors for a registry. +// This feature can be used for example to authenticate to a private registry at +// containerd (container runtime) level, i.e. globally instead of using +// `imagePullSecrets`. It also can be used to use a mirror for a registry or to +// enable insecure connections to trusted registries that have self-signed +// certificates. +type SpecKubernetesAdvancedContainerdRegistryConfigs []struct { + // Set to `true` to skip TLS verification (e.g. when using self-signed + // certificates). + InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty" yaml:"insecureSkipVerify,omitempty" mapstructure:"insecureSkipVerify,omitempty"` + + // Array of URLs with the mirrors to use for the registry. Example: + // `["http://mymirror.tld:8080"]` + MirrorEndpoint []string `json:"mirrorEndpoint,omitempty" yaml:"mirrorEndpoint,omitempty" mapstructure:"mirrorEndpoint,omitempty"` + + // The password containerd will use to authenticate against the registry. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // Registry address on which you would like to configure authentication or + // mirror(s). Example: `myregistry.tld:5000` + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // The username containerd will use to authenticate against the registry. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Advanced configuration for containerd +type SpecKubernetesAdvancedContainerd struct { + // RegistryConfigs corresponds to the JSON schema field "registryConfigs". + RegistryConfigs SpecKubernetesAdvancedContainerdRegistryConfigs `json:"registryConfigs,omitempty" yaml:"registryConfigs,omitempty" mapstructure:"registryConfigs,omitempty"` +} + +type SpecKubernetesAdvancedEncryption struct { + // etcd's encryption at rest configuration. Must be a string with the + // EncryptionConfiguration object in YAML. Example: + // + // ```yaml + // + // apiVersion: apiserver.config.k8s.io/v1 + // kind: EncryptionConfiguration + // resources: + // - resources: + // - secrets + // providers: + // - aescbc: + // keys: + // - name: mykey + // secret: base64_encoded_secret + // ``` + // + Configuration *string `json:"configuration,omitempty" yaml:"configuration,omitempty" mapstructure:"configuration,omitempty"` + + // The TLS cipher suites to use for etcd, kubelet, and kubeadm static pods. + // Example: + // ```yaml + // tlsCipherSuites: + // - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + // - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + // - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + // - "TLS_AES_128_GCM_SHA256" + // - "TLS_AES_256_GCM_SHA384" + // - "TLS_CHACHA20_POLY1305_SHA256" + // ``` + TlsCipherSuites []string `json:"tlsCipherSuites,omitempty" yaml:"tlsCipherSuites,omitempty" mapstructure:"tlsCipherSuites,omitempty"` +} + +// OIDC configuration for the Kubernetes API server. +type SpecKubernetesAdvancedOIDC struct { + // The path to the certificate for the CA that signed the identity provider's web + // certificate. Defaults to the host's root CAs. This should be a path available + // to the API Server. + CaFile *string `json:"ca_file,omitempty" yaml:"ca_file,omitempty" mapstructure:"ca_file,omitempty"` + + // The client ID the API server will use to authenticate to the OIDC provider. + ClientId *string `json:"client_id,omitempty" yaml:"client_id,omitempty" mapstructure:"client_id,omitempty"` + + // Prefix prepended to group claims to prevent clashes with existing names (such + // as system: groups). + GroupPrefix *string `json:"group_prefix,omitempty" yaml:"group_prefix,omitempty" mapstructure:"group_prefix,omitempty"` + + // JWT claim to use as the user's group. + GroupsClaim *string `json:"groups_claim,omitempty" yaml:"groups_claim,omitempty" mapstructure:"groups_claim,omitempty"` + + // The issuer URL of the OIDC provider. + IssuerUrl *string `json:"issuer_url,omitempty" yaml:"issuer_url,omitempty" mapstructure:"issuer_url,omitempty"` + + // JWT claim to use as the user name. The default value is `sub`, which is + // expected to be a unique identifier of the end user. + UsernameClaim *string `json:"username_claim,omitempty" yaml:"username_claim,omitempty" mapstructure:"username_claim,omitempty"` + + // Prefix prepended to username claims to prevent clashes with existing names + // (such as system: users). + UsernamePrefix *string `json:"username_prefix,omitempty" yaml:"username_prefix,omitempty" mapstructure:"username_prefix,omitempty"` +} + +type SpecKubernetesAdvancedUsers struct { + // List of user names to create and get a kubeconfig file. Users will not have any + // permissions by default, RBAC setup for the new users is needed. + Names []string `json:"names,omitempty" yaml:"names,omitempty" mapstructure:"names,omitempty"` + + // The organization the users belong to. + Org *string `json:"org,omitempty" yaml:"org,omitempty" mapstructure:"org,omitempty"` +} + +type SpecKubernetesAdvanced struct { + // AirGap corresponds to the JSON schema field "airGap". + AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` + + // Cloud corresponds to the JSON schema field "cloud". + Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` + + // Containerd corresponds to the JSON schema field "containerd". + Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` + + // Encryption corresponds to the JSON schema field "encryption". + Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` + + // Oidc corresponds to the JSON schema field "oidc". + Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` + + // URL of the registry where to pull images from for the Kubernetes phase. + // (Default is registry.sighup.io/fury/on-premises). + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // Users corresponds to the JSON schema field "users". + Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type SpecKubernetesAdvancedAnsible struct { + // Additional configuration to append to the ansible.cfg file + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // The Python interpreter to use for running Ansible. Example: python3 + PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` +} + +type SpecKubernetesLoadBalancersHost struct { + // The IP address of the host. + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLoadBalancersHost) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesLoadBalancersHost: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesLoadBalancersHost: required") + } + type Plain SpecKubernetesLoadBalancersHost + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancersHost(plain) + return nil +} + +type SpecKubernetesLoadBalancersKeepalived struct { + // Set to install keepalived with a floating virtual IP shared between the load + // balancer hosts for a deployment in High Availability. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // Name of the network interface where to bind the Keepalived virtual IP. + Interface *string `json:"interface,omitempty" yaml:"interface,omitempty" mapstructure:"interface,omitempty"` + + // The Virtual floating IP for Keepalived + Ip *string `json:"ip,omitempty" yaml:"ip,omitempty" mapstructure:"ip,omitempty"` + + // The passphrase for the Keepalived clustering. + Passphrase *string `json:"passphrase,omitempty" yaml:"passphrase,omitempty" mapstructure:"passphrase,omitempty"` + + // The virtual router ID of Keepalived, must be different from other Keepalived + // instances in the same network. + VirtualRouterId *string `json:"virtualRouterId,omitempty" yaml:"virtualRouterId,omitempty" mapstructure:"virtualRouterId,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLoadBalancersKeepalived) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancersKeepalived: required") + } + type Plain SpecKubernetesLoadBalancersKeepalived + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancersKeepalived(plain) + return nil +} + +// Configuration for HAProxy stats page. Accessible at http://:1936/stats +type SpecKubernetesLoadBalancersStats struct { + // The basic-auth password for HAProxy's stats page. + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The basic-auth username for HAProxy's stats page + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLoadBalancersStats) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecKubernetesLoadBalancersStats: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesLoadBalancersStats: required") + } + type Plain SpecKubernetesLoadBalancersStats + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancersStats(plain) + return nil +} + +type SpecKubernetesLoadBalancers struct { + // Additional configuration to append to HAProxy's configuration file. + AdditionalConfig *string `json:"additionalConfig,omitempty" yaml:"additionalConfig,omitempty" mapstructure:"additionalConfig,omitempty"` + + // Set to true to install HAProxy and configure it as a load balancer on the the + // load balancer hosts. + Enabled bool `json:"enabled" yaml:"enabled" mapstructure:"enabled"` + + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesLoadBalancersHost `json:"hosts,omitempty" yaml:"hosts,omitempty" mapstructure:"hosts,omitempty"` + + // Keepalived corresponds to the JSON schema field "keepalived". + Keepalived *SpecKubernetesLoadBalancersKeepalived `json:"keepalived,omitempty" yaml:"keepalived,omitempty" mapstructure:"keepalived,omitempty"` + + // Stats corresponds to the JSON schema field "stats". + Stats *SpecKubernetesLoadBalancersStats `json:"stats,omitempty" yaml:"stats,omitempty" mapstructure:"stats,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLoadBalancers) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enabled"]; !ok || v == nil { + return fmt.Errorf("field enabled in SpecKubernetesLoadBalancers: required") + } + type Plain SpecKubernetesLoadBalancers + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesLoadBalancers(plain) + return nil +} + +type SpecKubernetesMastersHost struct { + // The IP address of the host + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesMastersHost) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesMastersHost: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesMastersHost: required") + } + type Plain SpecKubernetesMastersHost + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesMastersHost(plain) + return nil +} + +// Configuration for the control plane hosts +type SpecKubernetesMasters struct { + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesMastersHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesMasters) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["hosts"]; !ok || v == nil { + return fmt.Errorf("field hosts in SpecKubernetesMasters: required") + } + type Plain SpecKubernetesMasters + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesMasters(plain) + return nil +} + +type SpecKubernetesNodesNodeHost struct { + // The IP address of the host + Ip string `json:"ip" yaml:"ip" mapstructure:"ip"` + + // A name to identify the host. This value will be concatenated to + // `.spec.kubernetes.dnsZone` to calculate the FQDN for the host as + // `.`. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodesNodeHost) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ip"]; !ok || v == nil { + return fmt.Errorf("field ip in SpecKubernetesNodesNodeHost: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodesNodeHost: required") + } + type Plain SpecKubernetesNodesNodeHost + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodesNodeHost(plain) + return nil +} + +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} + +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) + +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} + +type SpecKubernetesNodesNode struct { + // Hosts corresponds to the JSON schema field "hosts". + Hosts []SpecKubernetesNodesNodeHost `json:"hosts" yaml:"hosts" mapstructure:"hosts"` + + // Name for the node group. It will be also used as the node role label. It should + // follow the [valid variable names + // guideline](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#valid-variable-names) + // from Ansible. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Taints corresponds to the JSON schema field "taints". + Taints []TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodesNode) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["hosts"]; !ok || v == nil { + return fmt.Errorf("field hosts in SpecKubernetesNodesNode: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodesNode: required") + } + type Plain SpecKubernetesNodesNode + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.Hosts != nil && len(plain.Hosts) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "hosts", 1) + } + *j = SpecKubernetesNodesNode(plain) + return nil +} + +// Configuration for the node hosts +type SpecKubernetesNodes []SpecKubernetesNodesNode + +type TypesUri string + +type SpecKubernetesProxy struct { + // The HTTP proxy URL. Example: http://test.example.dev:3128 + Http *TypesUri `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // The HTTPS proxy URL. Example: https://test.example.dev:3128 + Https *TypesUri `json:"https,omitempty" yaml:"https,omitempty" mapstructure:"https,omitempty"` + + // Comma-separated list of hosts that should not use the HTTP(S) proxy. Example: + // localhost,127.0.0.1,172.16.0.0/17,172.16.128.0/17,10.0.0.0/8,.example.dev + NoProxy *string `json:"noProxy,omitempty" yaml:"noProxy,omitempty" mapstructure:"noProxy,omitempty"` +} + +// SSH credentials to access the hosts +type SpecKubernetesSSH struct { + // The path to the private key to use to connect to the hosts + KeyPath string `json:"keyPath" yaml:"keyPath" mapstructure:"keyPath"` + + // The username to use to connect to the hosts + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesSSH) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["keyPath"]; !ok || v == nil { + return fmt.Errorf("field keyPath in SpecKubernetesSSH: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesSSH: required") + } + type Plain SpecKubernetesSSH + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesSSH(plain) + return nil +} + +// Defines the Kubernetes components configuration and the values needed for the +// kubernetes phase of furyctl. +type SpecKubernetes struct { + // Advanced corresponds to the JSON schema field "advanced". + Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` + + // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". + AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` + + // The address for the Kubernetes control plane. Usually a DNS entry pointing to a + // Load Balancer on port 6443. + ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` + + // The DNS zone of the machines. It will be appended to the name of each host to + // generate the `kubernetes_hostname` in the Ansible inventory file. It is also + // used to calculate etcd's initial cluster value. + DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` + + // LoadBalancers corresponds to the JSON schema field "loadBalancers". + LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` + + // Masters corresponds to the JSON schema field "masters". + Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` + + // Nodes corresponds to the JSON schema field "nodes". + Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` + + // The path to the folder where the PKI files for Kubernetes and etcd are stored. + PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` + + // The subnet CIDR to use for the Pods network. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` + + // Proxy corresponds to the JSON schema field "proxy". + Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The subnet CIDR to use for the Services network. + SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["controlPlaneAddress"]; !ok || v == nil { + return fmt.Errorf("field controlPlaneAddress in SpecKubernetes: required") + } + if v, ok := raw["dnsZone"]; !ok || v == nil { + return fmt.Errorf("field dnsZone in SpecKubernetes: required") + } + if v, ok := raw["loadBalancers"]; !ok || v == nil { + return fmt.Errorf("field loadBalancers in SpecKubernetes: required") + } + if v, ok := raw["masters"]; !ok || v == nil { + return fmt.Errorf("field masters in SpecKubernetes: required") + } + if v, ok := raw["nodes"]; !ok || v == nil { + return fmt.Errorf("field nodes in SpecKubernetes: required") + } + if v, ok := raw["pkiFolder"]; !ok || v == nil { + return fmt.Errorf("field pkiFolder in SpecKubernetes: required") + } + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecKubernetes: required") + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecKubernetes: required") + } + if v, ok := raw["svcCidr"]; !ok || v == nil { + return fmt.Errorf("field svcCidr in SpecKubernetes: required") + } + type Plain SpecKubernetes + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetes(plain) + return nil +} + +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + } + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil +} + +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +} + +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` +} + +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +} + +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") + } + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) + return nil +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil +} + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} + +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + return nil +} + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + } + *j = TypesKubeTolerationEffect_1(v) + return nil +} + +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + } + *j = TypesKubeTolerationOperator_1(v) + return nil +} + +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration_1(plain) + return nil +} + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + } + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil +} + +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} + +var enumValues_OnpremisesKfdV1Alpha2Kind = []interface{}{ + "OnPremises", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OnpremisesKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_OnpremisesKfdV1Alpha2Kind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OnpremisesKfdV1Alpha2Kind, v) + } + *j = OnpremisesKfdV1Alpha2Kind(v) + return nil +} + +type TypesKubeNodeSelector map[string]string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Metadata) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") + } + type Plain Metadata + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *OnpremisesKfdV1Alpha2) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["apiVersion"]; !ok || v == nil { + return fmt.Errorf("field apiVersion in OnpremisesKfdV1Alpha2: required") + } + if v, ok := raw["kind"]; !ok || v == nil { + return fmt.Errorf("field kind in OnpremisesKfdV1Alpha2: required") + } + if v, ok := raw["metadata"]; !ok || v == nil { + return fmt.Errorf("field metadata in OnpremisesKfdV1Alpha2: required") + } + if v, ok := raw["spec"]; !ok || v == nil { + return fmt.Errorf("field spec in OnpremisesKfdV1Alpha2: required") + } + type Plain OnpremisesKfdV1Alpha2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = OnpremisesKfdV1Alpha2(plain) + return nil +} diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index e69de29bb..3323a7b0f 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -0,0 +1,2925 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "pattern": "^kfd\\.sighup\\.io/v\\d+((alpha|beta)\\d+)?$" + }, + "kind": { + "type": "string", + "enum": [ + "EKSCluster" + ] + }, + "metadata": { + "$ref": "#/$defs/Metadata" + }, + "spec": { + "$ref": "#/$defs/Spec" + } + }, + "additionalProperties": false, + "required": [ + "apiVersion", + "kind", + "metadata", + "spec" + ], + "$defs": { + "Metadata": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "minLength": 1, + "maxLength": 56 + } + }, + "required": [ + "name" + ] + }, + "Spec": { + "type": "object", + "additionalProperties": false, + "properties": { + "distributionVersion": { + "type": "string", + "minLength": 1 + }, + "region": { + "$ref": "#/$defs/Types.AwsRegion" + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." + }, + "toolsConfiguration": { + "$ref": "#/$defs/Spec.ToolsConfiguration" + }, + "infrastructure": { + "$ref": "#/$defs/Spec.Infrastructure" + }, + "kubernetes": { + "$ref": "#/$defs/Spec.Kubernetes" + }, + "distribution": { + "$ref": "#/$defs/Spec.Distribution" + }, + "plugins": { + "$ref": "../public/spec-plugins.json" + } + }, + "required": [ + "distributionVersion", + "region", + "kubernetes", + "distribution", + "toolsConfiguration" + ], + "if": { + "anyOf": [ + { + "properties": { + "infrastructure": { + "type": "null" + } + } + }, + { + "properties": { + "infrastructure": { + "properties": { + "vpc": { + "type": "null" + } + } + } + } + } + ] + }, + "then": { + "properties": { + "kubernetes": { + "required": [ + "vpcId", + "subnetIds" + ] + } + } + }, + "else": { + "properties": { + "kubernetes": { + "type": "object", + "properties": { + "vpcId": { + "type": "null" + }, + "subnetIds": { + "type": "null" + } + } + } + } + } + }, + "Spec.ToolsConfiguration": { + "type": "object", + "additionalProperties": false, + "properties": { + "terraform": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform" + } + }, + "required": [ + "terraform" + ] + }, + "Spec.ToolsConfiguration.Terraform": { + "type": "object", + "additionalProperties": false, + "properties": { + "state": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State" + } + }, + "required": [ + "state" + ] + }, + "Spec.ToolsConfiguration.Terraform.State": { + "type": "object", + "additionalProperties": false, + "properties": { + "s3": { + "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" + } + }, + "required": [ + "s3" + ] + }, + "Spec.ToolsConfiguration.Terraform.State.S3": { + "type": "object", + "additionalProperties": false, + "properties": { + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "description": "This value defines which bucket will be used to store all the states" + }, + "keyPrefix": { + "$ref": "#/$defs/Types.AwsS3KeyPrefix", + "description": "This value defines which folder will be used to store all the states inside the bucket" + }, + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "This value defines in which region the bucket is located" + }, + "skipRegionValidation": { + "type": "boolean", + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + } + }, + "required": [ + "bucketName", + "keyPrefix", + "region" + ] + }, + "Spec.Infrastructure": { + "type": "object", + "additionalProperties": false, + "properties": { + "vpc": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc", + "description": "This key defines the VPC that will be created in AWS" + }, + "vpn": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn", + "description": "This section defines the creation of VPN bastions" + } + }, + "allOf": [ + { + "if": { + "allOf": [ + { + "properties": { + "vpc": { + "type": "null" + } + } + }, + { + "not": { + "properties": { + "vpn": { + "type": "null" + } + } + } + } + ] + }, + "then": { + "properties": { + "vpn": { + "required": [ + "vpcId" + ] + } + } + } + }, + { + "if": { + "allOf": [ + { + "not": { + "properties": { + "vpc": { + "type": "null" + } + } + } + }, + { + "not": { + "properties": { + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } + } + } + } + } + ] + }, + "then": { + "properties": { + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } + } + } + } + } + ] + }, + "Spec.Infrastructure.Vpc": { + "type": "object", + "additionalProperties": false, + "properties": { + "network": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network" + } + }, + "required": [ + "network" + ] + }, + "Spec.Infrastructure.Vpc.Network": { + "type": "object", + "additionalProperties": false, + "properties": { + "cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This is the CIDR of the VPC that will be created" + }, + "subnetsCidrs": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" + } + }, + "required": [ + "cidr", + "subnetsCidrs" + ] + }, + "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { + "type": "object", + "additionalProperties": false, + "properties": { + "private": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + }, + "public": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + } + }, + "required": [ + "private", + "public" + ] + }, + "Spec.Infrastructure.Vpn": { + "type": "object", + "additionalProperties": false, + "properties": { + "instances": { + "type": "integer", + "description": "The number of instances to create, 0 to skip the creation" + }, + "port": { + "$ref": "#/$defs/Types.TcpPort", + "description": "The port used by the OpenVPN server" + }, + "instanceType": { + "type": "string", + "description": "The size of the AWS EC2 instance" + }, + "diskSize": { + "type": "integer", + "description": "The size of the disk in GB" + }, + "operatorName": { + "type": "string", + "description": "The username of the account to create in the bastion's operating system" + }, + "dhParamsBits": { + "type": "integer", + "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + }, + "vpnClientsSubnetCidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + }, + "ssh": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" + }, + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + }, + "bucketNamePrefix": { + "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", + "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + }, + "iamUserNameOverride": { + "$ref": "#/$defs/Types.AwsIamRoleName", + "description": "Overrides the default IAM user name for the VPN" + } + }, + "required": [ + "ssh", + "vpnClientsSubnetCidr" + ] + }, + "Spec.Infrastructure.Vpn.Ssh": { + "type": "object", + "additionalProperties": false, + "properties": { + "publicKeys": { + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "#/$defs/Types.SshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" + } + ] + }, + "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + }, + "githubUsersName": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + }, + "allowedFromCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The CIDR enabled in the security group that can access the bastions in SSH" + } + }, + "required": [ + "allowedFromCidrs", + "githubUsersName" + ] + }, + "Spec.Kubernetes": { + "type": "object", + "additionalProperties": false, + "properties": { + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + }, + "clusterIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default IAM role name prefix for the EKS cluster" + }, + "workersIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default IAM role name prefix for the EKS workers" + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" + }, + "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + }, + "apiServer": { + "$ref": "#/$defs/Spec.Kubernetes.APIServer" + }, + "serviceIpV4Cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + }, + "nodeAllowedSshPublicKey": { + "anyOf": [ + { + "$ref": "#/$defs/Types.AwsSshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" + } + ], + "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + }, + "nodePoolsLaunchKind": { + "type": "string", + "enum": [ + "launch_configurations", + "launch_templates", + "both" + ], + "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + }, + "logRetentionDays": { + "type": "integer", + "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + }, + "logsTypes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ] + }, + "minItems": 0, + "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." + }, + "nodePools": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool" + } + }, + "awsAuth": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth" + } + }, + "required": [ + "apiServer", + "nodeAllowedSshPublicKey", + "nodePools", + "nodePoolsLaunchKind" + ] + }, + "Spec.Kubernetes.APIServer": { + "type": "object", + "additionalProperties": false, + "properties": { + "privateAccess": { + "type": "boolean", + "description": "This value defines if the API server will be accessible only from the private subnets" + }, + "privateAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + }, + "publicAccessCidrs": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 0, + "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + }, + "publicAccess": { + "type": "boolean", + "description": "This value defines if the API server will be accessible from the public subnets" + } + }, + "required": [ + "privateAccess", + "publicAccess" + ] + }, + "Spec.Kubernetes.NodePool": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "eks-managed", + "self-managed" + ] + }, + "name": { + "type": "string", + "description": "The name of the node pool" + }, + "ami": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" + }, + "containerRuntime": { + "type": "string", + "enum": [ + "docker", + "containerd" + ], + "description": "The container runtime to use for the nodes" + }, + "size": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" + }, + "instance": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" + }, + "attachedTargetGroups": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsArn" + }, + "description": "This optional array defines additional target groups to attach to the instances in the node pool" + }, + "labels": { + "$ref": "#/$defs/Types.KubeLabels", + "description": "Kubernetes labels that will be added to the nodes" + }, + "taints": { + "$ref": "#/$defs/Types.KubeTaints", + "description": "Kubernetes taints that will be added to the nodes" + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "AWS tags that will be added to the ASG and EC2 instances" + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" + }, + "description": "This value defines the subnet IDs where the nodes will be created" + }, + "additionalFirewallRules": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" + } + }, + "required": [ + "instance", + "name", + "size" + ] + }, + "Spec.Kubernetes.NodePool.Ami": { + "type": "object", + "additionalProperties": false, + "properties": { + "id": { + "type": "string", + "description": "The AMI ID to use for the nodes" + }, + "owner": { + "type": "string", + "description": "The owner of the AMI" + } + }, + "required": [ + "id", + "owner" + ] + }, + "Spec.Kubernetes.NodePool.Instance": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "description": "The instance type to use for the nodes" + }, + "spot": { + "type": "boolean", + "description": "If true, the nodes will be created as spot instances" + }, + "volumeSize": { + "type": "integer", + "description": "The size of the disk in GB" + }, + "volumeType": { + "type": "string", + "enum": [ + "gp2", + "gp3", + "io1", + "standard" + ] + }, + "maxPods": { + "type": "integer" + } + }, + "required": [ + "type" + ] + }, + "Spec.Kubernetes.NodePool.Size": { + "type": "object", + "additionalProperties": false, + "properties": { + "min": { + "type": "integer", + "minimum": 0, + "description": "The minimum number of nodes in the node pool" + }, + "max": { + "type": "integer", + "minimum": 0, + "description": "The maximum number of nodes in the node pool" + } + }, + "required": [ + "max", + "min" + ] + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { + "type": "object", + "additionalProperties": false, + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" + }, + "minItems": 1, + "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + }, + "sourceSecurityGroupId": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId" + }, + "minItems": 1 + }, + "self": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self" + }, + "minItems": 1 + } + } + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ] + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags" + }, + "cidrBlocks": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "minItems": 1 + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + } + }, + "required": [ + "cidrBlocks", + "name", + "ports", + "protocol", + "type" + ] + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.SourceSecurityGroupId": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the FW rule" + }, + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the FW rule can be ingress or egress" + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "The tags of the FW rule" + }, + "sourceSecurityGroupId": { + "type": "string", + "description": "The source security group ID" + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the FW rule" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + } + }, + "required": [ + "sourceSecurityGroupId", + "name", + "ports", + "protocol", + "type" + ] + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Self": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the FW rule" + }, + "type": { + "type": "string", + "enum": [ + "ingress", + "egress" + ], + "description": "The type of the FW rule can be ingress or egress" + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "The tags of the FW rule" + }, + "self": { + "type": "boolean", + "description": "If true, the source will be the security group itself" + }, + "protocol": { + "$ref": "#/$defs/Types.AwsIpProtocol", + "description": "The protocol of the FW rule" + }, + "ports": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" + } + }, + "required": [ + "self", + "name", + "ports", + "protocol", + "type" + ] + }, + "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { + "type": "object", + "additionalProperties": false, + "properties": { + "from": { + "$ref": "#/$defs/Types.TcpPort" + }, + "to": { + "$ref": "#/$defs/Types.TcpPort" + } + }, + "required": [ + "from", + "to" + ] + }, + "Spec.Kubernetes.AwsAuth": { + "type": "object", + "additionalProperties": false, + "properties": { + "additionalAccounts": { + "type": "array", + "items": { + "type": "string" + }, + "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + }, + "users": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" + }, + "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + }, + "roles": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" + }, + "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + } + } + }, + "Spec.Kubernetes.AwsAuth.Role": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "rolearn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "groups", + "rolearn", + "username" + ] + }, + "Spec.Kubernetes.AwsAuth.User": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string" + }, + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "userarn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "groups", + "userarn", + "username" + ] + }, + "Spec.Distribution": { + "type": "object", + "additionalProperties": false, + "properties": { + "common": { + "$ref": "#/$defs/Spec.Distribution.Common" + }, + "modules": { + "$ref": "#/$defs/Spec.Distribution.Modules" + }, + "customPatches": { + "$ref": "../public/spec-distribution-custompatches.json" + } + }, + "required": [ + "modules" + ], + "if": { + "allOf": [ + { + "required": [ + "common" + ] + }, + { + "properties": { + "common": { + "required": [ + "provider" + ] + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "required": [ + "type" + ] + } + } + } + } + }, + { + "properties": { + "common": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "eks" + } + } + } + } + } + } + } + ] + }, + "then": { + "properties": { + "modules": { + "required": [ + "aws" + ] + } + } + }, + "else": { + "properties": { + "modules": { + "properties": { + "aws": { + "type": "null" + } + } + } + } + } + }, + "Spec.Distribution.Common": { + "type": "object", + "additionalProperties": false, + "properties": { + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for all the KFD modules" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for all the KFD modules" + }, + "provider": { + "$ref": "#/$defs/Spec.Distribution.Common.Provider" + }, + "relativeVendorPath": { + "type": "string", + "description": "The relative path to the vendor directory, does not need to be changed" + }, + "registry": { + "type": "string", + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + } + } + }, + "Spec.Distribution.Common.Provider": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "description": "The type of the provider, must be EKS if specified" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules": { + "type": "object", + "additionalProperties": false, + "properties": { + "auth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth" + }, + "aws": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws" + }, + "dr": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr" + }, + "ingress": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress" + }, + "logging": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging" + }, + "monitoring": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring" + }, + "tracing": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing" + }, + "networking": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking" + }, + "policy": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy" + } + }, + "required": [ + "dr", + "ingress", + "logging", + "policy" + ] + }, + "Spec.Distribution.Modules.Ingress": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides" + }, + "baseDomain": { + "type": "string", + "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + }, + "nginx": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", + "description": "Configurations for the nginx ingress controller module" + }, + "certManager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + }, + "dns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" + }, + "forecastle": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" + }, + "externalDns": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ExternalDNS" + } + }, + "required": [ + "certManager", + "externalDns", + "baseDomain", + "nginx" + ], + "allOf": [ + { + "if": { + "properties": { + "nginx": { + "properties": { + "type": { + "const": "dual" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public", + "private" + ] + } + } + } + }, + { + "if": { + "properties": { + "nginx": { + "properties": { + "type": { + "const": "single" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public" + ] + } + } + } + }, + { + "if": { + "properties": { + "nginx": { + "properties": { + "tls": { + "properties": { + "provider": { + "const": "certManager" + } + } + } + } + } + } + }, + "then": { + "required": [ + "certManager" + ] + } + } + ] + }, + "Spec.Distribution.Modules.Ingress.Overrides": { + "type": "object", + "additionalProperties": false, + "properties": { + "ingresses": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" + }, + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the ingress module" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the ingress module" + } + } + }, + "Spec.Distribution.Modules.Ingress.Overrides.Ingresses": { + "type": "object", + "additionalProperties": false, + "properties": { + "forecastle": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + } + } + }, + "Spec.Distribution.Modules.Ingress.Forecastle": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Ingress.Nginx": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "none", + "single", + "dual" + ], + "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + }, + "tls": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Ingress.Nginx.TLS": { + "type": "object", + "additionalProperties": false, + "properties": { + "provider": { + "type": "string", + "enum": [ + "certManager", + "secret", + "none" + ], + "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + }, + "secret": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" + } + }, + "required": [ + "provider" + ], + "if": { + "properties": { + "provider": { + "const": "secret" + } + } + }, + "then": { + "required": [ + "secret" + ] + } + }, + "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { + "type": "object", + "additionalProperties": false, + "properties": { + "cert": { + "type": "string", + "description": "The certificate file content or you can use the file notation to get the content from a file" + }, + "key": { + "type": "string" + }, + "ca": { + "type": "string" + } + }, + "required": [ + "ca", + "cert", + "key" + ] + }, + "Spec.Distribution.Modules.Ingress.CertManager": { + "type": "object", + "additionalProperties": false, + "properties": { + "clusterIssuer": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "clusterIssuer" + ] + }, + "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the cluster issuer" + }, + "email": { + "type": "string", + "format": "email", + "description": "The email of the cluster issuer" + }, + "type": { + "type": "string", + "enum": [ + "dns01", + "http01" + ], + "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + }, + "solvers": { + "type": "array", + "description": "The custom solvers configurations" + }, + "route53": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" + } + }, + "required": [ + "route53", + "name", + "email" + ], + "oneOf": [ + { + "required": [ + "type" + ] + }, + { + "required": [ + "solvers" + ] + } + ] + }, + "Spec.Distribution.Modules.Ingress.DNS": { + "type": "object", + "additionalProperties": false, + "properties": { + "public": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" + }, + "private": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Ingress.DNS.Public": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the public hosted zone" + }, + "create": { + "type": "boolean", + "description": "If true, the public hosted zone will be created" + } + }, + "required": [ + "name", + "create" + ] + }, + "Spec.Distribution.Modules.Ingress.DNS.Private": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the private hosted zone" + }, + "create": { + "type": "boolean", + "description": "If true, the private hosted zone will be created" + }, + "vpcId": { + "type": "string" + } + }, + "required": [ + "vpcId", + "name", + "create" + ] + }, + "Spec.Distribution.Modules.Logging": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "type": { + "type": "string", + "enum": [ + "none", + "opensearch", + "loki", + "customOutputs" + ], + "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + }, + "opensearch": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" + }, + "loki": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Loki" + }, + "cerebro": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Cerebro" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Minio" + }, + "operator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Operator" + }, + "customOutputs": { + "$ref": "#/$defs/Spec.Distribution.Modules.Logging.CustomOutputs" + } + }, + "required": [ + "type" + ], + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "opensearch" + } + } + }, + "then": { + "required": [ + "opensearch" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "loki" + } + } + }, + "then": { + "required": [ + "loki" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "customOutputs" + } + } + }, + "then": { + "required": [ + "customOutputs" + ] + } + } + ] + }, + "Spec.Distribution.Modules.Logging.Opensearch": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "single", + "triple" + ], + "description": "The type of the opensearch, must be ***single*** or ***triple***" + }, + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "storageSize": { + "type": "string", + "description": "The storage size for the opensearch pods" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Logging.Cerebro": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Logging.Minio": { + "type": "object", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The PVC size for each minio disk, 6 disks total" + }, + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username of the minio root user" + }, + "password": { + "type": "string", + "description": "The password of the minio root user" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Logging.Loki": { + "type": "object", + "additionalProperties": false, + "properties": { + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ] + }, + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The endpoint of the loki external endpoint" + }, + "insecure": { + "type": "boolean", + "description": "If true, the loki external endpoint will be insecure" + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the loki external endpoint" + }, + "accessKeyId": { + "type": "string", + "description": "The access key id of the loki external endpoint" + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the loki external endpoint" + } + } + }, + "tsdbStartDate": { + "type": "string", + "format": "date", + "description": "Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the time series database from BoltDB to TSDB and the schema from v11 to v13 that it uses to store the logs.\n\nThe value of this field will determine the date when Loki will start writing using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB and schema will be kept until they expire for reading purposes.\n\nValue must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024-11-18`." + }, + "resources": { + "$ref": "#/$defs/Types.KubeResources" + } + }, + "required": [ + "tsdbStartDate" + ] + }, + "Spec.Distribution.Modules.Logging.Operator": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Logging.CustomOutputs": { + "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "type": "object", + "additionalProperties": false, + "properties": { + "audit": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "events": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "infra": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "ingressNginx": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "kubernetes": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "systemdCommon": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "systemdEtcd": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + }, + "errors": { + "type": "string", + "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + } + }, + "required": [ + "audit", + "events", + "infra", + "ingressNginx", + "kubernetes", + "systemdCommon", + "systemdEtcd", + "errors" + ] + }, + "Spec.Distribution.Modules.Monitoring": { + "type": "object", + "additionalProperties": false, + "description": "configuration for the Monitoring module components", + "properties": { + "type": { + "type": "string", + "enum": [ + "none", + "prometheus", + "prometheusAgent", + "mimir" + ], + "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "prometheus": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Prometheus" + }, + "prometheusAgent": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.PrometheusAgent" + }, + "alertmanager": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.AlertManager" + }, + "grafana": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Grafana" + }, + "blackboxExporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.BlackboxExporter" + }, + "kubeStateMetrics": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.KubeStateMetrics" + }, + "x509Exporter": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.X509Exporter" + }, + "mimir": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Mimir" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Monitoring.Minio" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Monitoring.Prometheus": { + "type": "object", + "additionalProperties": false, + "properties": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "retentionTime": { + "type": "string", + "description": "The retention time for the k8s Prometheus instance." + }, + "retentionSize": { + "type": "string", + "description": "The retention size for the k8s Prometheus instance." + }, + "storageSize": { + "type": "string", + "description": "The storage size for the k8s Prometheus instance." + }, + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "Spec.Distribution.Modules.Monitoring.PrometheusAgent": { + "type": "object", + "additionalProperties": false, + "properties": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "remoteWrite": { + "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "Spec.Distribution.Modules.Monitoring.AlertManager": { + "type": "object", + "additionalProperties": false, + "properties": { + "deadManSwitchWebhookUrl": { + "type": "string", + "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + }, + "installDefaultRules": { + "type": "boolean", + "description": "If true, the default rules will be installed" + }, + "slackWebhookUrl": { + "type": "string", + "description": "The slack webhook url to send alerts" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Grafana": { + "type": "object", + "additionalProperties": false, + "properties": { + "usersRoleAttributePath": { + "type": "string", + "description": "[JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's role. Example:\n\n```yaml\nusersRoleAttributePath: \"contains(groups[*], 'beta') && 'Admin' || contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && 'Viewer'\n```\n\nMore details in [Grafana's documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping)." + }, + "basicAuthIngress": { + "type": "boolean", + "description": "Setting this to true will deploy an additional `grafana-basic-auth` ingress protected with Grafana's basic auth instead of SSO. It's intended use is as a temporary ingress for when there are problems with the SSO login flow.\n\nNotice that by default anonymous access is enabled." + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.BlackboxExporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.KubeStateMetrics": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.X509Exporter": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Mimir": { + "type": "object", + "additionalProperties": false, + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the mimir pods" + }, + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + }, + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The endpoint of the external mimir backend" + }, + "insecure": { + "type": "boolean", + "description": "If true, the external mimir backend will not use tls" + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the external mimir backend" + }, + "accessKeyId": { + "type": "string", + "description": "The access key id of the external mimir backend" + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external mimir backend" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Monitoring.Minio": { + "type": "object", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The storage size for the minio pods" + }, + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the minio root user" + }, + "password": { + "type": "string", + "description": "The password for the minio root user" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "type": { + "type": "string", + "enum": [ + "none", + "tempo" + ], + "description": "The type of tracing to use, either ***none*** or ***tempo***" + }, + "tempo": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" + }, + "minio": { + "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Minio" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Tracing.Tempo": { + "type": "object", + "additionalProperties": false, + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the tempo pods" + }, + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + }, + "externalEndpoint": { + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The endpoint of the external tempo backend" + }, + "insecure": { + "type": "boolean", + "description": "If true, the external tempo backend will not use tls" + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key of the external tempo backend" + }, + "accessKeyId": { + "type": "string", + "description": "The access key id of the external tempo backend" + }, + "bucketName": { + "type": "string", + "description": "The bucket name of the external tempo backend" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing.Minio": { + "type": "object", + "additionalProperties": false, + "properties": { + "storageSize": { + "type": "string", + "description": "The storage size for the minio pods" + }, + "rootUser": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the minio root user" + }, + "password": { + "type": "string", + "description": "The password for the minio root user" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Networking": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + }, + "tigeraOperator": { + "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" + }, + "type": { + "type": "string", + "enum": [ + "none" + ] + } + } + }, + "Spec.Distribution.Modules.Networking.TigeraOperator": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Policy": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "type": { + "type": "string", + "enum": [ + "none", + "gatekeeper", + "kyverno" + ], + "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + }, + "gatekeeper": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" + }, + "kyverno": { + "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Kyverno" + } + }, + "required": [ + "type" + ], + "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "gatekeeper" + } + } + }, + "then": { + "required": [ + "gatekeeper" + ] + } + }, + { + "if": { + "properties": { + "type": { + "const": "kyverno" + } + } + }, + "then": { + "required": [ + "kyverno" + ] + } + } + ] + }, + "Spec.Distribution.Modules.Policy.Gatekeeper": { + "type": "object", + "additionalProperties": false, + "properties": { + "additionalExcludedNamespaces": { + "type": "array", + "items": { + "type": "string" + }, + "description": "This parameter adds namespaces to Gatekeeper's exemption list, so it will not enforce the constraints on them." + }, + "enforcementAction": { + "type": "string", + "enum": [ + "deny", + "dryrun", + "warn" + ], + "description": "The enforcement action to use for the gatekeeper module" + }, + "installDefaultPolicies": { + "type": "boolean", + "description": "If true, the default policies will be installed" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "enforcementAction", + "installDefaultPolicies" + ] + }, + "Spec.Distribution.Modules.Policy.Kyverno": { + "type": "object", + "additionalProperties": false, + "properties": { + "additionalExcludedNamespaces": { + "type": "array", + "items": { + "type": "string" + }, + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + }, + "validationFailureAction": { + "type": "string", + "enum": [ + "Audit", + "Enforce" + ], + "description": "The validation failure action to use for the kyverno module" + }, + "installDefaultPolicies": { + "type": "boolean", + "description": "If true, the default policies will be installed" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "validationFailureAction", + "installDefaultPolicies" + ] + }, + "Spec.Distribution.Modules.Dr": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + }, + "type": { + "type": "string", + "enum": [ + "none", + "eks" + ], + "description": "The type of the DR, must be ***none*** or ***eks***" + }, + "velero": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" + } + }, + "required": [ + "type" + ], + "if": { + "properties": { + "type": { + "const": "eks" + } + } + }, + "then": { + "required": [ + "type", + "velero" + ] + } + }, + "Spec.Distribution.Modules.Dr.Velero": { + "type": "object", + "additionalProperties": false, + "properties": { + "schedules": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's backup schedules.", + "properties": { + "install": { + "type": "boolean", + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + }, + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + } + } + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", + "properties": { + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + } + } + } + } + } + } + }, + "eks": { + "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero.Eks" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "eks" + ] + }, + "Spec.Distribution.Modules.Dr.Velero.Eks": { + "type": "object", + "additionalProperties": false, + "properties": { + "region": { + "$ref": "#/$defs/Types.AwsRegion", + "description": "The region where the velero bucket is located" + }, + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "maxLength": 49, + "description": "The name of the velero bucket" + }, + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "iamRoleArn", + "region", + "bucketName" + ] + }, + "Spec.Distribution.Modules.Auth": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" + }, + "provider": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider" + }, + "baseDomain": { + "type": "string", + "description": "The base domain for the auth module" + }, + "pomerium": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" + }, + "dex": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Dex" + } + }, + "required": [ + "provider" + ], + "allOf": [ + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "sso" + } + } + } + } + }, + "then": { + "required": [ + "dex", + "pomerium", + "baseDomain" + ] + }, + "else": { + "properties": { + "dex": { + "type": "null" + }, + "pomerium": { + "type": "null" + } + } + } + }, + { + "if": { + "properties": { + "provider": { + "properties": { + "type": { + "const": "basicAuth" + } + } + } + } + }, + "then": { + "properties": { + "provider": { + "required": [ + "basicAuth" + ] + } + } + }, + "else": { + "properties": { + "provider": { + "basicAuth": { + "type": "null" + } + } + } + } + } + ] + }, + "Spec.Distribution.Modules.Auth.Overrides": { + "type": "object", + "additionalProperties": false, + "properties": { + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the auth module" + }, + "tolerations": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the auth module" + }, + "ingresses": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" + } + } + } + }, + "Spec.Distribution.Modules.Auth.Overrides.Ingress": { + "type": "object", + "additionalProperties": false, + "properties": { + "host": { + "type": "string", + "description": "The host of the ingress" + }, + "ingressClass": { + "type": "string", + "description": "The ingress class of the ingress" + } + }, + "required": [ + "host", + "ingressClass" + ] + }, + "Spec.Distribution.Modules.Auth.Provider": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "none", + "basicAuth", + "sso" + ], + "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + }, + "basicAuth": { + "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" + } + }, + "required": [ + "type" + ] + }, + "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { + "type": "object", + "additionalProperties": false, + "properties": { + "username": { + "type": "string", + "description": "The username for the basic auth" + }, + "password": { + "type": "string", + "description": "The password for the basic auth" + } + }, + "required": [ + "username", + "password" + ] + }, + "Spec.Distribution.Modules.Auth.Pomerium": { + "$ref": "../public/spec-distribution-modules-auth-pomerium.json" + }, + "Spec.Distribution.Modules.Auth.Dex": { + "type": "object", + "additionalProperties": false, + "properties": { + "connectors": { + "type": "array", + "description": "The connectors for dex" + }, + "additionalStaticClients": { + "type": "array", + "description": "The additional static clients for dex" + }, + "expiry": { + "type": "object", + "additionalProperties": false, + "properties": { + "signingKeys": { + "type": "string", + "description": "Dex signing key expiration time duration (default 6h)." + }, + "idTokens": { + "type": "string", + "description": "Dex ID tokens expiration time duration (default 24h)." + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "connectors" + ] + }, + "Spec.Distribution.Modules.Aws": { + "type": "object", + "additionalProperties": false, + "properties": { + "clusterAutoscaler": { + "$ref": "#/$defs/Spec.Distribution.Modules.Aws.ClusterAutoscaler" + }, + "ebsCsiDriver": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] + }, + "loadBalancerController": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] + }, + "ebsSnapshotController": { + "type": "object", + "additionalProperties": false, + "properties": { + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleOverrides" + } + }, + "required": [ + "clusterAutoscaler", + "ebsCsiDriver", + "loadBalancerController", + "overrides" + ] + }, + "Types.SemVer": { + "type": "string", + "pattern": "^v?(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)\\.(?P0|[1-9]\\d*)(?:-(?P(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+(?P[0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, + "Types.IpAddress": { + "type": "string", + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\b){4}$" + }, + "Types.Cidr": { + "type": "string", + "pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}\\/(3[0-2]|[1-2][0-9]|[0-9])$" + }, + "Types.FileRef": { + "type": "string", + "pattern": "^\\{file\\:\\/\\/.+\\}$" + }, + "Types.EnvRef": { + "type": "string", + "pattern": "\\{^env\\:\\/\\/.*\\}$" + }, + "Types.TcpPort": { + "type": "integer", + "minimum": 0, + "maximum": 65535 + }, + "Types.SshPubKey": { + "type": "string", + "pattern": "^ssh\\-(dsa|ecdsa|ecdsa-sk|ed25519|ed25519-sk|rsa)\\s+" + }, + "Types.Uri": { + "type": "string", + "pattern": "^(http|https)\\:\\/\\/.+$" + }, + "Types.AwsArn": { + "type": "string", + "pattern": "^arn:(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P[^:\\n]*):(?P(?P[^:\\/\\n]*)[:\\/])?(?P.*)$" + }, + "Types.AwsRegion": { + "type": "string", + "enum": [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2" + ] + }, + "Types.AwsVpcId": { + "type": "string", + "pattern": "^vpc\\-([0-9a-f]{8}|[0-9a-f]{17})$" + }, + "Types.AwsSshPubKey": { + "type": "string", + "pattern": "^ssh\\-(ed25519|rsa)\\s+" + }, + "Types.AwsSubnetId": { + "type": "string", + "pattern": "^subnet\\-[0-9a-f]{17}$" + }, + "Types.AwsTags": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.AwsIpProtocol": { + "type": "string", + "pattern": "^(?i)(tcp|udp|icmp|icmpv6|-1)$", + "$comment": "this value should be lowercase, but we rely on terraform to do the conversion to make it a bit more user friendly" + }, + "Types.AwsIamRoleNamePrefix": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,38}$" + }, + "Types.AwsIamRoleName": { + "type": "string", + "pattern": "^[a-zA-Z0-9+=,.@_-]{1,63}$" + }, + "Types.AwsS3BucketName": { + "type": "string", + "allOf": [ + { + "pattern": "^[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]$" + }, + { + "not": { + "pattern": "^xn--|-s3alias$" + } + } + ] + }, + "Types.AwsS3BucketNamePrefix": { + "type": "string", + "allOf": [ + { + "pattern": "^[a-z0-9][a-z0-9-.]{1,35}[a-z0-9-.]$" + }, + { + "not": { + "pattern": "^xn--|-s3alias$" + } + } + ] + }, + "Types.AwsS3KeyPrefix": { + "type": "string", + "pattern": "^[A-z0-9][A-z0-9!-_.*'()]+$", + "maxLength": 960 + }, + "Types.KubeLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeTaints": { + "type": "array", + "items": { + "type": "string", + "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=([^-][\\w-]+):(NoSchedule|PreferNoSchedule|NoExecute)$" + } + }, + "Types.KubeNodeSelector": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "Types.KubeToleration": { + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "operator": { + "type": "string", + "enum": [ + "Exists", + "Equal" + ] + }, + "key": { + "type": "string", + "description": "The key of the toleration" + }, + "value": { + "type": "string", + "description": "The value of the toleration" + } + }, + "required": [ + "effect", + "key" + ], + "anyOf": [ + { + "required": [ + "operator" + ] + }, + { + "required": [ + "value" + ] + } + ] + }, + "Types.KubeResources": { + "type": "object", + "additionalProperties": false, + "properties": { + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The cpu request for the prometheus pods" + }, + "memory": { + "type": "string", + "description": "The memory request for the opensearch pods" + } + } + }, + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The cpu limit for the opensearch pods" + }, + "memory": { + "type": "string", + "description": "The memory limit for the opensearch pods" + } + } + } + } + }, + "Types.FuryModuleOverrides": { + "type": "object", + "additionalProperties": false, + "properties": { + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the dr module" + }, + "tolerations": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the monitoring module" + }, + "ingresses": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/Types.FuryModuleOverridesIngress" + } + } + } + }, + "Types.FuryModuleComponentOverrides": { + "type": "object", + "additionalProperties": false, + "properties": { + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the minio module" + }, + "tolerations": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the cert-manager module" + } + } + }, + "Types.FuryModuleComponentOverridesWithIAMRoleName": { + "type": "object", + "additionalProperties": false, + "properties": { + "nodeSelector": { + "$ref": "#/$defs/Types.KubeNodeSelector", + "description": "The node selector to use to place the pods for the load balancer controller module" + }, + "tolerations": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/$defs/Types.KubeToleration" + }, + "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + }, + "iamRoleName": { + "$ref": "#/$defs/Types.AwsIamRoleName" + } + } + }, + "Types.FuryModuleOverridesIngress": { + "type": "object", + "additionalProperties": false, + "properties": { + "disableAuth": { + "type": "boolean", + "description": "If true, the ingress will not have authentication" + }, + "host": { + "type": "string", + "description": "The host of the ingress" + }, + "ingressClass": { + "type": "string", + "description": "The ingress class of the ingress" + } + } + }, + "Spec.Distribution.Modules.Aws.ClusterAutoscaler": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverridesWithIAMRoleName" + } + }, + "required": [ + "iamRoleArn" + ] + }, + "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "region": { + "$ref": "#/$defs/Types.AwsRegion" + }, + "hostedZoneId": { + "type": "string" + } + }, + "required": [ + "hostedZoneId", + "iamRoleArn", + "region" + ] + }, + "Spec.Distribution.Modules.Ingress.ExternalDNS": { + "type": "object", + "additionalProperties": false, + "properties": { + "privateIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "publicIamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + } + }, + "required": [ + "privateIamRoleArn", + "publicIamRoleArn" + ] + } + } +} From 820144ee827018690dd3d571c965d3d18a476448 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Wed, 20 Nov 2024 13:04:44 +0100 Subject: [PATCH 093/160] fix(network-policies): remove unused policy --- .../monitoring/policies/grafana.yaml.tpl | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl index cc7728597..95b548e7b 100644 --- a/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/grafana.yaml.tpl @@ -34,32 +34,6 @@ spec: --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy -metadata: - name: grafana-egress-tempo-gateway - namespace: monitoring - labels: - cluster.kfd.sighup.io/module: monitoring -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app.kubernetes.io/name: grafana - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: tracing - podSelector: - matchLabels: - app.kubernetes.io/name: tempo - app.kubernetes.io/component: gateway - ports: - - port: 8080 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy metadata: name: grafana-ingress-nginx namespace: monitoring From 85ca1ef3df60e55d107e981f40433204346bbff1 Mon Sep 17 00:00:00 2001 From: Simone Bruzzese Date: Wed, 20 Nov 2024 13:05:01 +0100 Subject: [PATCH 094/160] fix(docs): fix names in monitoring network policies --- .../network-policies/modules/monitoring/mimir.md | 16 ++++++++-------- .../modules/monitoring/prometheus.md | 5 ----- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/docs/network-policies/modules/monitoring/mimir.md b/docs/network-policies/modules/monitoring/mimir.md index c118a0e27..5619dbfe4 100644 --- a/docs/network-policies/modules/monitoring/mimir.md +++ b/docs/network-policies/modules/monitoring/mimir.md @@ -4,14 +4,14 @@ graph TD %% Namespace subgraph monitoring - gateway[Gateway
component: gateway] - distributor[Distributor
component: distributor] - ingester[Ingester
component: ingester] - querier[Querier
component: querier] - qfront[Query Frontend
component: query-frontend] - qsched[Query Scheduler
component: query-scheduler] - store[Store Gateway
component: store-gateway] - compactor[Compactor
component: compactor] + gateway[Mimir Gateway
app.kubernetes.io/component: gateway] + distributor[Mimir Distributor
app.kubernetes.io/component: distributor] + ingester[Mimir Ingester
app.kubernetes.io/component: ingester] + querier[Mimir Querier
app.kubernetes.io/component: querier] + qfront[Mimir Query Frontend
app.kubernetes.io/component: query-frontend] + qsched[Mimir Query Scheduler
app.kubernetes.io/component: query-scheduler] + store[Mimir Store Gateway
app.kubernetes.io/component: store-gateway] + compactor[Mimir Compactor
app.kubernetes.io/component: compactor] grafana[Grafana
app.kubernetes.io/name: grafana] prom[Prometheus
app.kubernetes.io/name: prometheus] am[Alertmanager
app.kubernetes.io/component: alert-router] diff --git a/docs/network-policies/modules/monitoring/prometheus.md b/docs/network-policies/modules/monitoring/prometheus.md index b9bb8da70..f05457035 100644 --- a/docs/network-policies/modules/monitoring/prometheus.md +++ b/docs/network-policies/modules/monitoring/prometheus.md @@ -17,10 +17,6 @@ graph TD x509[x509 Exporter
app: x509-certificate-exporter] end - subgraph tracing - tempo[Tempo Gateway
app.kubernetes.io/name: tempo
app.kubernetes.io/component: gateway] - end - %% External and K8s Core Components api[Kubernetes API] dns[Kube DNS] @@ -42,7 +38,6 @@ graph TD prom -->|"9093,8080/TCP"| am pom -->|"9093/TCP"| am prom -->|"3000/TCP"| grafana - grafana -->|"8080/TCP"| tempo pom -->|"3000/TCP"| grafana x509 -->|"6443/TCP"| api ``` From cb249f45914dff03e6ebc3b59aa4afcfe794798a Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Fri, 15 Nov 2024 13:56:00 +0100 Subject: [PATCH 095/160] feat(ekscluster): add capability to choose ami type os # Conflicts: # schemas/private/ekscluster-kfd-v1alpha2.json --- docs/schemas/ekscluster-kfd-v1alpha2.md | 15 +++++ kfd.yaml | 2 +- schemas/private/ekscluster-kfd-v1alpha2.json | 58 +++++++++++++++++-- schemas/public/ekscluster-kfd-v1alpha2.json | 58 +++++++++++++++++-- .../config/ekscluster-kfd-v1alpha2.yaml.tpl | 3 + .../ekscluster/terraform/main.auto.tfvars.tpl | 7 ++- .../ekscluster/terraform/main.tf.tpl | 1 + .../ekscluster/terraform/variables.tf | 24 ++++++-- 8 files changed, 148 insertions(+), 20 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 6abb4d5e4..5a237c0f7 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -5437,6 +5437,21 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u |`"launch_templates"` | |`"both"` | +## .spec.kubernetes.nodePoolGlobalAmiType + +### Description + +Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. + +### Constraints + +**enum**: the value of this property must be equal to one of the following values: + +| Value | +|:---------------| +| `"alinux2"` | +| `"alinux2023"` | + ## .spec.kubernetes.serviceIpV4Cidr ### Description diff --git a/kfd.yaml b/kfd.yaml index 1e26eb4ae..3749f00c1 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -16,7 +16,7 @@ modules: kubernetes: eks: version: 1.30 - installer: v3.1.2 + installer: v3.2.0-rc0 onpremises: version: 1.30.6 installer: v1.30.6-rc.2 diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 3323a7b0f..a425f728f 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -472,6 +472,14 @@ ], "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, + "nodePoolGlobalAmiType": { + "type": "string", + "enum": [ + "alinux2", + "alinux2023" + ], + "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." + }, "logRetentionDays": { "type": "integer", "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." @@ -607,8 +615,36 @@ "required": [ "instance", "name", - "size" - ] + "size", + "type" + ], + "if": { + "allOf": [ + { + "properties": { + "type": { + "enum": [ + "eks-managed" + ] + } + } + } + ] + }, + "then": { + "properties": { + "ami": { + "properties": { + "id": { + "type": "null" + }, + "owner": { + "type": "null" + } + } + } + } + } }, "Spec.Kubernetes.NodePool.Ami": { "type": "object", @@ -621,12 +657,22 @@ "owner": { "type": "string", "description": "The owner of the AMI" + }, + "type": { + "type": "string", + "description": "The AMI type based on OS", + "enum": [ + "alinux2", + "alinux2023" + ] } }, - "required": [ - "id", - "owner" - ] + "dependencies": { + "id": [ + "owner" + ] + }, + "required": [] }, "Spec.Kubernetes.NodePool.Instance": { "type": "object", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 7693cbd7d..6429beba2 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -472,6 +472,14 @@ ], "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, + "nodePoolGlobalAmiType": { + "type": "string", + "enum": [ + "alinux2", + "alinux2023" + ], + "description": "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." + }, "logRetentionDays": { "type": "integer", "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." @@ -607,8 +615,36 @@ "required": [ "instance", "name", - "size" - ] + "size", + "type" + ], + "if": { + "allOf": [ + { + "properties": { + "type": { + "enum": [ + "eks-managed" + ] + } + } + } + ] + }, + "then": { + "properties": { + "ami": { + "properties": { + "id": { + "type": "null" + }, + "owner": { + "type": "null" + } + } + } + } + } }, "Spec.Kubernetes.NodePool.Ami": { "type": "object", @@ -621,12 +657,22 @@ "owner": { "type": "string", "description": "The owner of the AMI" + }, + "type": { + "type": "string", + "description": "The AMI type based on OS", + "enum": [ + "alinux2", + "alinux2023" + ] } }, - "required": [ - "id", - "owner" - ] + "dependencies": { + "id": [ + "owner" + ] + }, + "required": [] }, "Spec.Kubernetes.NodePool.Instance": { "type": "object", diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index 157f9ff37..7349f9223 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -85,6 +85,8 @@ spec: nodeAllowedSshPublicKey: "ssh-ed25519 XYZ" # Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. nodePoolsLaunchKind: "launch_templates" + # Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. Valid values are: `alinux2`, `alinux2023` + nodePoolGlobalAmiType: "alinux2" # Optional Kubernetes Cluster log retention in days. Defaults to 90 days. # logRetentionDays: 90 # This map defines the access to the Kubernetes API server @@ -97,6 +99,7 @@ spec: nodePools: # This is the name of the nodepool - name: infra + type: self-managed # This map defines the max and min number of nodes in the nodepool autoscaling group size: min: 1 diff --git a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl index ce574ac1b..c03abb66b 100644 --- a/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl +++ b/templates/kubernetes/ekscluster/terraform/main.auto.tfvars.tpl @@ -32,6 +32,7 @@ cluster_service_ipv4_cidr = null cluster_service_ipv4_cidr = {{ .spec.kubernetes.serviceIpV4Cidr | quote }} {{- end }} node_pools_launch_kind = {{ .spec.kubernetes.nodePoolsLaunchKind | quote }} +node_pools_global_ami_type = {{ .spec.kubernetes.nodePoolGlobalAmiType | quote }} {{- if hasKeyAny .spec.kubernetes "logRetentionDays" }} cluster_log_retention_days = {{ .spec.kubernetes.logRetentionDays }} @@ -97,7 +98,11 @@ workers_iam_role_name_prefix_override = {{ .spec.kubernetes.workersIAMRoleNamePr {{- end}} {{- if hasKeyAny $np "ami" }} - {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id "ami_owners" (list $np.ami.owner)) }} + {{- if and (eq $np.type "self-managed") (hasKeyAny $np.ami "id") (not (hasKeyAny $np.ami "type")) }} + {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_id" $np.ami.id "ami_owners" (list $np.ami.owner)) }} + {{- else if and (hasKeyAny $np.ami "type") (not (hasKeyAny $np.ami "id")) }} + {{- $currNodePool = mergeOverwrite $currNodePool (dict "ami_type" $np.ami.type) }} + {{- end }} {{- end }} {{- if hasKeyAny $np.instance "spot" }} diff --git a/templates/kubernetes/ekscluster/terraform/main.tf.tpl b/templates/kubernetes/ekscluster/terraform/main.tf.tpl index 5932b4169..615ca5b50 100644 --- a/templates/kubernetes/ekscluster/terraform/main.tf.tpl +++ b/templates/kubernetes/ekscluster/terraform/main.tf.tpl @@ -62,6 +62,7 @@ module "fury" { ssh_public_key = var.ssh_public_key node_pools = var.node_pools node_pools_launch_kind = var.node_pools_launch_kind + node_pools_global_ami_type = var.node_pools_global_ami_type tags = var.tags cluster_iam_role_name = var.cluster_iam_role_name_prefix_override workers_role_name = var.workers_iam_role_name_prefix_override diff --git a/templates/kubernetes/ekscluster/terraform/variables.tf b/templates/kubernetes/ekscluster/terraform/variables.tf index 30dc3547c..993e88ef0 100644 --- a/templates/kubernetes/ekscluster/terraform/variables.tf +++ b/templates/kubernetes/ekscluster/terraform/variables.tf @@ -63,19 +63,21 @@ variable "ssh_public_key" { variable "node_pools" { description = "An object list defining node pools configurations" type = list(object({ - name = string type = optional(string, "self-managed") # "eks-managed" or "self-managed" + name = string ami_id = optional(string) - version = optional(string) # null to use cluster_version + ami_owners = optional(list(string), ["amazon"]) + ami_type = optional(string, null) + version = optional(string, null) # null to use cluster_version min_size = number max_size = number instance_type = string - container_runtime = optional(string) - spot_instance = optional(bool) - max_pods = optional(number) # null to use default upstream configuration + container_runtime = optional(string, "containerd") + spot_instance = optional(bool, false) + max_pods = optional(number, null) # null to use default upstream configuration volume_size = optional(number, 100) volume_type = optional(string, "gp2") - subnets = optional(list(string)) # null to use default upstream configuration + subnets = optional(list(string), null) # null to use default upstream configuration labels = optional(map(string)) taints = optional(list(string)) tags = optional(map(string)) @@ -219,3 +221,13 @@ variable "workers_iam_role_name_prefix_override" { type = string default = "" } + +variable "node_pools_global_ami_type" { + type = string + description = "Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool." + default = "alinux2" + validation { + condition = contains(["alinux2", "alinux2023"], var.node_pools_global_ami_type) + error_message = "The global AMI type must be either 'alinux2' or 'alinux2023'." + } +} \ No newline at end of file From 5c119b10c46a753b9a265bcb7003629b33cd7db6 Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Mon, 18 Nov 2024 15:52:07 +0100 Subject: [PATCH 096/160] fix(template,config,eks): fix infra nodes tags --- templates/config/ekscluster-kfd-v1alpha2.yaml.tpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index 7349f9223..664278077 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -127,8 +127,8 @@ spec: - node.kubernetes.io/role=infra:NoSchedule # AWS tags that will be added to the ASG and EC2 instances, the example shows the labels needed by cluster autoscaler tags: - k8s.io/cluster-autoscaler/node-template/label/nodepool: "worker" - k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "worker" + k8s.io/cluster-autoscaler/node-template/label/nodepool: "infra" + k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/role: "infra" # Optional additional firewall rules that will be attached to the nodes #additionalFirewallRules: # # The name of the rule From 2d84b3372d1ae4b2db0db71d5581dfa881bf6b84 Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Mon, 18 Nov 2024 17:11:33 +0100 Subject: [PATCH 097/160] ci(test): fix schema tests --- .../ekscluster-kfd-v1alpha2/001-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/001-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/002-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/002-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/003-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/003-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/004-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/004-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/005-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/005-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/006-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/006-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/007-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/007-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/008-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/008-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/009-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/009-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/010-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/010-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/011-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/011-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/012-no.yaml | 134 +++++++++++++++ .../ekscluster-kfd-v1alpha2/012-ok.yaml | 156 ++++++++++++++++++ .../ekscluster-kfd-v1alpha2/001-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/001-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/002-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/002-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/003-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/003-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/004-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/004-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/005-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/005-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/006-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/006-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/007-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/007-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/008-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/008-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/009-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/009-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/010-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/010-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/011-no.yaml | 1 + .../ekscluster-kfd-v1alpha2/011-ok.yaml | 1 + .../ekscluster-kfd-v1alpha2/012-no.yaml | 134 +++++++++++++++ .../ekscluster-kfd-v1alpha2/012-ok.yaml | 134 +++++++++++++++ 48 files changed, 602 insertions(+) create mode 100644 tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml create mode 100644 tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml create mode 100644 tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml create mode 100644 tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml index a77ee5ed8..d5986d7c1 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml @@ -56,6 +56,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml index 54c739aa1..4f18f6f91 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml index b283a5734..5888b36c0 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml @@ -23,6 +23,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml index ce0c069b7..5a3c32f26 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -33,6 +33,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml index 883048d06..e2e9d56a2 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml index 4e1677078..249c25a09 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml index 1dad8e9dd..420b4840d 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml index 0738fbbb2..580764001 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml index d78345fae..ccb5cb6d6 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml index a121a534d..34d2dd0af 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml index d8ffae0ee..885d1e3a0 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml index cf50e7bab..b2365c265 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml index f59c5b000..efdb5c4dc 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml index b96b1925f..853a45c9b 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml index a92184088..93a4776c6 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml index fa26569fd..f4069e810 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml index 4516f01a3..51484c2b1 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml index fa26569fd..f4069e810 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml index 484b9722c..621ac5be6 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml index 0844c8324..59cbf0a0c 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml index 7caeeac88..aaab0aa55 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml @@ -41,6 +41,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml index 3224f62f8..17284c184 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -42,6 +42,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml new file mode 100644 index 000000000..65dcdeba9 --- /dev/null +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml @@ -0,0 +1,134 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled +# When I validate the config against the schema +# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml new file mode 100644 index 000000000..7a4dc98c0 --- /dev/null +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -0,0 +1,156 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled +# When I validate the config against the schema +# Then no errors are returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + vpcId: vpc-0123456789abcdef0 + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: + clusterAutoscaler: + iamRoleArn: arn:aws:iam::123456789012:role/cluster-autoscaler + ebsCsiDriver: + iamRoleArn: arn:aws:iam::123456789012:role/ebs-csi-driver + loadBalancerController: + iamRoleArn: arn:aws:iam::123456789012:role/load-balancer-controller + overrides: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + iamRoleArn: arn:aws:iam::123456789012:role/velero + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + vpcId: vpc-12345678901234567 + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + certManager: + clusterIssuer: + name: letsencrypt-fury + email: email@test.it + type: http01 + route53: + region: eu-west-1 + hostedZoneId: Z1234567890 + iamRoleArn: arn:aws:iam::123456789012:role/cert-manager + externalDns: + privateIamRoleArn: arn:aws:iam::123456789012:role/external-dns-private + publicIamRoleArn: arn:aws:iam::123456789012:role/external-dns-public + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml index 4f152ad42..763067fbf 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml @@ -56,6 +56,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml index ef7e71b3a..fd25653f5 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml index ab277b1db..23ba3cb2b 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml @@ -23,6 +23,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml index 44f8f4a7e..4ee0dbdb1 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -33,6 +33,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml index eca2f32cd..1fb2e5ae9 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml index 2b634eb7d..6d4cdc170 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml index 46d733beb..356843236 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml index e3c0320bc..502d91aa9 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml index 703123b34..67e02a452 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml index 033761852..1e74a70aa 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml index 3c0c8e6dc..cec6acfe2 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml @@ -50,6 +50,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml index 55f04cbab..327d7f79c 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml index f59c5b000..efdb5c4dc 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml index 79e7d86cb..3183a77cd 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml index 7ba29188c..80d99e13a 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml index 660e03ea2..d4dfd6ae2 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml index bf088d77c..0985fe398 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml index b99c536b6..20cb35b93 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml index a553e542a..cbae48b16 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml @@ -49,6 +49,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml index 8e2e4f7d0..b203f3327 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -48,6 +48,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml index 7caeeac88..aaab0aa55 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml @@ -41,6 +41,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml index 2bbc729bd..75f0c9e0e 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -42,6 +42,7 @@ spec: - ami: id: ami-01234567890123456 owner: "123456789012" + type: self-managed instance: type: t3.large name: worker-eks diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml new file mode 100644 index 000000000..65dcdeba9 --- /dev/null +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml @@ -0,0 +1,134 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is filled +# When I validate the config against the schema +# Then an error "$ref/properties/nodePools/items/$ref/then/properties/ami/properties/id/type: expected null, but got string" is returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml new file mode 100644 index 000000000..828309885 --- /dev/null +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -0,0 +1,134 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Tests the following cases: + +# Given "spec.kubernetes.nodePools.0.type" is 'eks-managed' and "spec.kubernetes.nodePools.0.ami.id" is not filled +# When I validate the config against the schema +# Then no errors are returned + +--- +apiVersion: kfd.sighup.io/v1alpha2 +kind: EKSCluster +metadata: + name: furyctl-dev-aws-al +spec: + infrastructure: + vpn: + ssh: + allowedFromCidrs: + - 0.0.0.0/0 + githubUsersName: + - jnardiello + publicKeys: + - ssh-ed25519 SomethingSomething engineering@sighup.io + vpnClientsSubnetCidr: 192.168.200.0/24 + kubernetes: + apiServer: + privateAccess: true + privateAccessCidrs: ["10.0.0.3/16"] + publicAccessCidrs: [] + publicAccess: false + vpcId: vpc-0123456789abcdef0 + subnetIds: + - subnet-0123456789abcdef0 + - subnet-0123456789abcdef1 + - subnet-0123456789abcdef2 + nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both + nodePools: + - ami: + id: ami-01234567890123456 + owner: "123456789012" + type: eks-managed + instance: + type: t3.large + name: worker-eks + size: + max: 3 + min: 2 + distribution: + customPatches: + configMapGenerator: + - name: a-configmap + files: + - /path/to/config.example + - name: b-configmap + envs: + - /path/to/envs.env + patches: + - target: + group: "" + version: v1 + kind: Service + name: cluster-autoscaler + namespace: kube-system + path: /path/to/patch.yaml + patchesStrategicMerge: + - | + --- + apiVersion: v1 + kind: Service + metadata: + labels: + label1: value1 + name: cluster-autoscaler + namespace: kube-system + secretGenerator: + - name: a-secret + files: + - /path/to/config.example + - name: b-secret + envs: + - /path/to/envs.env + common: + provider: + type: eks + modules: + aws: {} + dr: + type: eks + velero: + eks: + bucketName: example-velero + region: eu-west-1 + ingress: + baseDomain: furyctl-demo.sighup.io + dns: + private: + create: true + name: internal.furyctl-demo.sighup.io + public: + create: true + name: furyctl-demo.sighup.io + nginx: + type: single + tls: + provider: secret + secret: + ca: | + value + cert: | + value + key: | + value + logging: + type: opensearch + opensearch: + type: single + policy: + type: gatekeeper + gatekeeper: + additionalExcludedNamespaces: [] + installDefaultPolicies: true + enforcementAction: deny + distributionVersion: v1.24.1 + region: eu-west-1 + toolsConfiguration: + terraform: + state: + s3: + bucketName: furyctl-test-eks + keyPrefix: furyctl-test + region: eu-west-1 From e1009b0e3d4302b83839b00280a84013c8ffca61 Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Wed, 20 Nov 2024 14:36:15 +0100 Subject: [PATCH 098/160] feat(ekscluster): bump eks-installer to v3.2.0-rc2 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 3749f00c1..b32714ee1 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -16,7 +16,7 @@ modules: kubernetes: eks: version: 1.30 - installer: v3.2.0-rc0 + installer: v3.2.0-rc1 onpremises: version: 1.30.6 installer: v1.30.6-rc.2 From f40a64df4e1f6207047d9bfabd4b689d8d660293 Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Wed, 20 Nov 2024 15:02:45 +0100 Subject: [PATCH 099/160] fix(templates,config): set default loki.tsdbStartDate to KFD release date --- templates/config/ekscluster-kfd-v1alpha2.yaml.tpl | 3 +++ templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl | 3 +++ templates/config/onpremises-kfd-v1alpha2.yaml.tpl | 3 +++ 3 files changed, 9 insertions(+) diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index 664278077..f823ad075 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -230,6 +230,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total diff --git a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl index f6af9d6b1..e2e795330 100644 --- a/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl +++ b/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl @@ -76,6 +76,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total diff --git a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl index f56bbdbfa..7ea97c12a 100644 --- a/templates/config/onpremises-kfd-v1alpha2.yaml.tpl +++ b/templates/config/onpremises-kfd-v1alpha2.yaml.tpl @@ -153,6 +153,9 @@ spec: logging: # can be opensearch, loki, customOutput or none. With none, the logging module won't be installed type: loki + # configurations for the loki package + loki: + tsdbStartDate: "2024-11-20" # configurations for the minio-ha package minio: # the PVC size for each minio disk, 6 disks total From 5e219fd57bc253fa965d7e056867133e97ac4f5b Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 20 Nov 2024 15:35:29 +0100 Subject: [PATCH 100/160] feat(auth): bump to v0.4.0-rc.0 - bump Auth module to v0.4.0-rc.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 1e26eb4ae..a38114e79 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -4,7 +4,7 @@ version: v1.30.0 modules: - auth: v0.3.0 + auth: v0.4.0.rc.0 aws: v4.2.1 dr: v3.0.0-rc.1 ingress: v3.0.1-rc.1 From fe3bfe79e19664bf9308de02afe4ae5c8cb08d4b Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 16:01:48 +0100 Subject: [PATCH 101/160] feat: update AWS module to v4.3.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 1e26eb4ae..7d1dccb54 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -5,7 +5,7 @@ version: v1.30.0 modules: auth: v0.3.0 - aws: v4.2.1 + aws: v4.3.0 dr: v3.0.0-rc.1 ingress: v3.0.1-rc.1 logging: v4.0.0-rc.0 From b443c4b95fb013cc906cdb9a6c71f57dabec7426 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 16:51:36 +0100 Subject: [PATCH 102/160] docs: WIP prepare main README.md for 1.30 --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 286838261..2e28aec31 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@

Kubernetes Fury Distribution (KFD) is a certified battle-tested Kubernetes distribution based purely on upstream Kubernetes.

-[![Build Status](http://ci.sighup.io/api/badges/sighupio/fury-distribution/status.svg?ref=refs/tags/v1.29.4)](http://ci.sighup.io/sighupio/fury-distribution) -[![Release](https://img.shields.io/badge/release-v1.29.4-blue?label=FuryDistributionRelease)](https://github.com/sighupio/fury-distribution/releases/latest) +[![Build Status](http://ci.sighup.io/api/badges/sighupio/fury-distribution/status.svg?ref=refs/tags/v1.30.0)](http://ci.sighup.io/sighupio/fury-distribution) +[![Release](https://img.shields.io/badge/release-v1.30.0-blue?label=FuryDistributionRelease)](https://github.com/sighupio/fury-distribution/releases/latest) [![Slack](https://img.shields.io/badge/slack-@kubernetes/fury-yellow.svg?logo=slack)](https://kubernetes.slack.com/archives/C0154HYTAQH) [![License](https://img.shields.io/github/license/sighupio/fury-distribution)](https://github.com/sighupio/fury-distribution/blob/main/LICENSE) @@ -130,9 +130,9 @@ Current supported versions of KFD are: | KFD Version | Kubernetes Version | | :----------------------------------------------------------------------------: | :----------------: | -| [`1.29.4`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.4) | `1.29.x` | -| [`1.28.4`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.4) | `1.28.x` | -| [`1.27.9`](https://github.com/sighupio/fury-distribution/releases/tag/v1.27.8) | `1.27.x` | +| [`1.30.0`](https://github.com/sighupio/fury-distribution/releases/tag/v1.30.0) | `1.30.x` | +| [`1.29.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.29.5) | `1.29.x` | +| [`1.28.5`](https://github.com/sighupio/fury-distribution/releases/tag/v1.28.5) | `1.28.x` | Check the [compatibility matrix][compatibility-matrix] for additional information about previous releases of the Distribution and the compatibility with `furyctl`. @@ -174,13 +174,13 @@ KFD is open-source software and it's released under the following [LICENSE](LICE [dr-module]: https://github.com/sighupio/fury-kubernetes-dr [opa-module]: https://github.com/sighupio/fury-kubernetes-opa [auth-module]: https://github.com/sighupio/fury-kubernetes-auth -[networking-version]: https://img.shields.io/badge/release-v1.17.0-blue -[ingress-version]: https://img.shields.io/badge/release-v2.3.3-blue -[logging-version]: https://img.shields.io/badge/release-v3.4.1-blue -[monitoring-version]: https://img.shields.io/badge/release-v3.2.0-blue -[tracing-version]: https://img.shields.io/badge/release-v1.0.3-blue -[dr-version]: https://img.shields.io/badge/release-v2.3.0-blue -[opa-version]: https://img.shields.io/badge/release-v1.12.0-blue +[networking-version]: https://img.shields.io/badge/release-v2.0.0-blue +[ingress-version]: https://img.shields.io/badge/release-v3.0.1-blue +[logging-version]: https://img.shields.io/badge/release-v4.0.0-blue +[monitoring-version]: https://img.shields.io/badge/release-v3.3.0-blue +[tracing-version]: https://img.shields.io/badge/release-v1.1.0-blue +[dr-version]: https://img.shields.io/badge/release-v3.0.0-blue +[opa-version]: https://img.shields.io/badge/release-v1.13.0-blue [auth-version]: https://img.shields.io/badge/release-v0.3.0-blue From 76e60caa43260bb5043123d82e6a3fd2024458f5 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 16:54:34 +0100 Subject: [PATCH 103/160] docs: update release note for v1.30.0 with target module upgrades --- docs/releases/v1.30.0.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index b8766c40e..8016520d6 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -10,26 +10,26 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i - [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐Ÿ“ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) - TBD -- [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.X.X**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.X.X) +- [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.2.0**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.2.0) - TBD ### Module updates -- [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/vX.X.X) +- [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐Ÿ“ฆ core module: [**v2.0.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v2.0.0) - TBD -- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/vX.X.X) +- [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐Ÿ“ฆ core module: [**v3.3.0**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v3.3.0) - TBD -- [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/vX.X.X) +- [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐Ÿ“ฆ core module: [**v4.0.0**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v4.0.0) - TBD -- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/vX.X.X) +- [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐Ÿ“ฆ core module: [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v3.0.1) - TBD -- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/vX.X.X) +- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**v0.X.0**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.X.0) - TBD -- [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/vX.X.X) +- [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐Ÿ“ฆ core module: [**v3.0.0**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v3.0.0) - TBD -- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/vX.X.X) +- [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**v1.1.0**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/v1.1.0) - TBD -- [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**vX.X.X**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/vX.X.X) +- [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**v4.3.0**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/v4.3.0) - TBD ## New features ๐ŸŒŸ From cb1292caa8e3764d2e5c4707f52091c044c17aaa Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 17:33:54 +0100 Subject: [PATCH 104/160] feat: update ingress to v3.0.1-rc.2 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 1ce16bc44..da0ac39ef 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -7,7 +7,7 @@ modules: auth: v0.4.0.rc.0 aws: v4.3.0 dr: v3.0.0-rc.1 - ingress: v3.0.1-rc.1 + ingress: v3.0.1-rc.2 logging: v4.0.0-rc.0 monitoring: v3.3.0-rc.1 opa: v1.13.0 From c78a81353ec7355a0cb0723901d88d7d9e6f5e8b Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 17:49:15 +0100 Subject: [PATCH 105/160] fix: auth version --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index da0ac39ef..6afa5942f 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -4,7 +4,7 @@ version: v1.30.0 modules: - auth: v0.4.0.rc.0 + auth: v0.4.0-rc.0 aws: v4.3.0 dr: v3.0.0-rc.1 ingress: v3.0.1-rc.2 From 3e11594a5816c6d1c6ba7b4629112bf0696f81ef Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 20 Nov 2024 18:05:22 +0100 Subject: [PATCH 106/160] feat: bump networking to v2.0.0-rc.2 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 6afa5942f..ca28f80cb 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -11,7 +11,7 @@ modules: logging: v4.0.0-rc.0 monitoring: v3.3.0-rc.1 opa: v1.13.0 - networking: v2.0.0-rc.1 + networking: v2.0.0-rc.2 tracing: v1.1.0 kubernetes: eks: From ef0983f765f8b326d24c3adf350b121e52d61e7e Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 21 Nov 2024 12:21:07 +0100 Subject: [PATCH 107/160] docs(network-policies): update READMEs and diagrams --- docs/network-policies/modules/auth/README.md | 12 +----------- docs/network-policies/modules/ingress/README.md | 3 ++- docs/network-policies/modules/ingress/single.md | 2 ++ docs/network-policies/modules/logging/README.md | 6 ++---- docs/network-policies/modules/logging/loki.md | 6 +++--- docs/network-policies/modules/logging/opensearch.md | 2 +- 6 files changed, 11 insertions(+), 20 deletions(-) diff --git a/docs/network-policies/modules/auth/README.md b/docs/network-policies/modules/auth/README.md index 90a2f5d10..488d84b6b 100644 --- a/docs/network-policies/modules/auth/README.md +++ b/docs/network-policies/modules/auth/README.md @@ -10,17 +10,7 @@ - deny-all - all-egress-kube-dns - pomerium-ingress-nginx -- pomerium-egress-https -- pomerium-egress-grafana -- pomerium-egress-prometheus -- pomerium-egress-alert-manager -- pomerium-egress-forecastle -- pomerium-egress-gpm -- pomerium-egress-hubble-ui -- pomerium-egress-opensearch-dashboard -- pomerium-egress-minio-logging -- pomerium-egress-minio-tracing -- pomerium-ingress-prometheus-metrics +- pomerium-egress-all ## Configurations - [SSO with Pomerium](sso.md) diff --git a/docs/network-policies/modules/ingress/README.md b/docs/network-policies/modules/ingress/README.md index b609e816a..23eb467df 100644 --- a/docs/network-policies/modules/ingress/README.md +++ b/docs/network-policies/modules/ingress/README.md @@ -27,7 +27,8 @@ - forecastle-egress-kube-apiserver - nginx-egress-all - all-ingress-nginx -- nginx-ingress-prometheus-metrics +- nginx-ingress-prometheus-metric +- external-dns ## Configurations - [Single Nginx](single.md) diff --git a/docs/network-policies/modules/ingress/single.md b/docs/network-policies/modules/ingress/single.md index 1fb318341..b0f7b2054 100644 --- a/docs/network-policies/modules/ingress/single.md +++ b/docs/network-policies/modules/ingress/single.md @@ -6,6 +6,7 @@ graph TD subgraph ingress-nginx nginx[Nginx Controller
app: ingress-nginx] fc[Forecastle
app: forecastle] + edns[ExternalDNS
app: external-dns] end subgraph cert-manager @@ -30,4 +31,5 @@ graph TD all[All Namespaces] -->|"8080,8443,9443/TCP"| nginx nginx -->|"egress: all"| all nginx -->|"3000/TCP"| fc + edns --> |"egress: all"| ext ``` \ No newline at end of file diff --git a/docs/network-policies/modules/logging/README.md b/docs/network-policies/modules/logging/README.md index b9bed7296..41fb930d9 100644 --- a/docs/network-policies/modules/logging/README.md +++ b/docs/network-policies/modules/logging/README.md @@ -13,6 +13,7 @@ - deny-all - all-egress-kube-dns - event-tailer-egress-kube-apiserver +- fluentd-egress-all - fluentbit-egress-fluentd - fluentbit-egress-kube-apiserver - fluentbit-ingress-prometheus-metrics @@ -21,8 +22,6 @@ ### OpenSearch Stack - fluentd-ingress-fluentbit - fluentd-ingress-prometheus-metrics -- fluentd-egress-minio -- fluentd-egress-opensearch - opensearch-discovery - opensearch-ingress-dashboards - opensearch-ingress-fluentd @@ -34,12 +33,11 @@ - jobs-egress-opensearch ### Loki Stack -- fluentd-egress-loki - loki-distributed-ingress-fluentd - loki-distributed-ingress-grafana - loki-distributed-ingress-prometheus-metrics - loki-distributed-discovery -- loki-distributed-egress-minio +- loki-distributed-egress-all ### MinIO - minio-ingress-namespace diff --git a/docs/network-policies/modules/logging/loki.md b/docs/network-policies/modules/logging/loki.md index f7f80c12e..1b87f37ce 100644 --- a/docs/network-policies/modules/logging/loki.md +++ b/docs/network-policies/modules/logging/loki.md @@ -42,9 +42,9 @@ graph TD loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester loki_querier -->|"loki-discovery
9095,7946,3100/TCP"| loki_query_frontend loki_compactor -->|"loki-discovery
9095,7946,3100/TCP"| loki_ingester - loki_compactor -->|"9000/TCP"| minio - loki_ingester -->|"9000/TCP"| minio - loki_querier -->|"9000/TCP"| minio + loki_compactor -->|"egress: all"| minio + loki_ingester -->|"egress: all"| minio + loki_querier -->|"egress: all"| minio bucket -->|"9000/TCP"| minio minio -->|"443/TCP"| ext pom -->|"9001/TCP"| minio diff --git a/docs/network-policies/modules/logging/opensearch.md b/docs/network-policies/modules/logging/opensearch.md index bd7f3d9c4..5cf5727eb 100644 --- a/docs/network-policies/modules/logging/opensearch.md +++ b/docs/network-policies/modules/logging/opensearch.md @@ -30,7 +30,7 @@ graph TD op -->|"6443/TCP"| api bucket -->|"6443/TCP"| api fb -->|"24240/TCP"| fd - fd -->|"9200/TCP"| os + fd -->|"egress: all"| os osd -->|"9200/TCP"| os pom -->|"5601/TCP"| osd job -->|"5601/TCP"| osd From b3798f1406b2d27f661fc57f57d0369f0469561a Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 21 Nov 2024 12:25:13 +0100 Subject: [PATCH 108/160] feat(network-policies): add external-dns egress policy --- .../ingress-nginx/external-dns.yaml.tpl | 22 +++++++++++++++++++ .../ingress-nginx/kustomization.yaml.tpl | 1 + 2 files changed, 23 insertions(+) create mode 100644 templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl new file mode 100644 index 000000000..3bd02356e --- /dev/null +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/external-dns.yaml.tpl @@ -0,0 +1,22 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: external-dns-egress-all + namespace: ingress-nginx + labels: + cluster.kfd.sighup.io/module: ingress + cluster.kfd.sighup.io/ingress-type: nginx +spec: + podSelector: + matchLabels: + app: external-dns + policyTypes: + - Egress + egress: + - {} +--- diff --git a/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl index fba4b8119..46494b30e 100644 --- a/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl +++ b/templates/distribution/manifests/ingress/policies/ingress-nginx/kustomization.yaml.tpl @@ -11,3 +11,4 @@ resources: - forecastle.yaml - nginx-ingress-controller.yaml - prometheus-metrics.yaml + - external-dns.yaml From 4967b9c93c18f58f1be5917d3968c3038a7fdc4f Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 21 Nov 2024 12:27:03 +0100 Subject: [PATCH 109/160] chore(network-policies): add copyright notice to tracing policies --- .../manifests/tracing/policies/minio.yaml.tpl | 5 ++ .../manifests/tracing/policies/tempo.yaml.tpl | 53 ++++++++++--------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl index 5089cd95b..9e4244d78 100644 --- a/templates/distribution/manifests/tracing/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/minio.yaml.tpl @@ -1,3 +1,8 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: diff --git a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl index 01e8f0f43..09528ec3d 100644 --- a/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl +++ b/templates/distribution/manifests/tracing/policies/tempo.yaml.tpl @@ -1,3 +1,8 @@ +# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +--- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -120,20 +125,20 @@ metadata: labels: cluster.kfd.sighup.io/module: tracing spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app.kubernetes.io/instance: tempo-distributed - egress: - - to: - - podSelector: - matchLabels: - app.kubernetes.io/name: tempo - app.kubernetes.io/component: memcached - ports: - - port: 11211 - protocol: TCP + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/instance: tempo-distributed + egress: + - to: + - podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/component: memcached + ports: + - port: 11211 + protocol: TCP --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -192,16 +197,16 @@ metadata: labels: cluster.kfd.sighup.io/module: tracing spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app.kubernetes.io/name: tempo - app.kubernetes.io/instance: tempo-distributed - egress: - - ports: - - port: 443 - protocol: TCP + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: tempo + app.kubernetes.io/instance: tempo-distributed + egress: + - ports: + - port: 443 + protocol: TCP {{- if eq .spec.distribution.modules.tracing.tempo.backend "minio" }} --- apiVersion: networking.k8s.io/v1 From 48e5ca648b9924809b7ba78ae923575f37f87960 Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Thu, 21 Nov 2024 12:28:03 +0100 Subject: [PATCH 110/160] fix(network-policies): logging,auth --- .../manifests/auth/policies/pomerium.yaml.tpl | 244 +----------------- .../logging/policies/fluentd.yaml.tpl | 83 +----- .../manifests/logging/policies/loki.yaml.tpl | 14 +- 3 files changed, 17 insertions(+), 324 deletions(-) diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index 626cb2946..c82c7fc4f 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -32,52 +32,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: pomerium-egress-https - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - ports: - - port: 443 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-grafana - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: monitoring - podSelector: - matchLabels: - app.kubernetes.io/component: grafana - ports: - - port: 3000 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-prometheus + name: pomerium-egress-all namespace: pomerium labels: cluster.kfd.sighup.io/module: auth @@ -89,200 +44,5 @@ spec: matchLabels: app: pomerium egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: monitoring - podSelector: - matchLabels: - app.kubernetes.io/name: prometheus - ports: - - port: 9090 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-alert-manager - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: monitoring - podSelector: - matchLabels: - alertmanager: main - ports: - - port: 9093 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-forecastle - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: ingress-nginx - podSelector: - matchLabels: - app: forecastle - ports: - - port: 3000 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-gpm - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: gatekeeper-system - podSelector: - matchLabels: - app: gatekeeper-policy-manager - ports: - - port: 8080 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-hubble-ui - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: kube-system - podSelector: - matchLabels: - app.kubernetes.io/name: hubble-ui - ports: - - port: 8081 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-opensearch-dashboard - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso - cluster.kfd.sighup.io/logging-type: opensearch -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: logging - podSelector: - matchLabels: - app: opensearch-dashboards - ports: - - port: 9200 - protocol: TCP - - port: 5601 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-minio-logging - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso - cluster.kfd.sighup.io/logging-backend: minio -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: logging - podSelector: - matchLabels: - app: minio - ports: - - port: 9001 - protocol: TCP ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: pomerium-egress-minio-tracing - namespace: pomerium - labels: - cluster.kfd.sighup.io/module: auth - cluster.kfd.sighup.io/auth-provider-type: sso -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app: pomerium - egress: - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: tracing - podSelector: - matchLabels: - app: minio - ports: - - port: 9001 - protocol: TCP + - {} --- diff --git a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl index 48bfd6a13..95adfac59 100644 --- a/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/fluentd.yaml.tpl @@ -6,52 +6,43 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: fluentd-ingress-fluentbit + name: fluentd-egress-all namespace: logging labels: cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - - Ingress + - Egress podSelector: matchLabels: app.kubernetes.io/name: fluentd - ingress: - - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: fluentbit - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: logging - ports: - - port: 24240 - protocol: TCP + egress: + - {} --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: fluentd-egress-minio + name: fluentd-ingress-fluentbit namespace: logging labels: cluster.kfd.sighup.io/module: logging - cluster.kfd.sighup.io/logging-backend: minio spec: policyTypes: - - Egress + - Ingress podSelector: matchLabels: app.kubernetes.io/name: fluentd - egress: - - to: + ingress: + - from: - podSelector: matchLabels: - app: minio + app.kubernetes.io/name: fluentbit namespaceSelector: matchLabels: kubernetes.io/metadata.name: logging ports: - - port: 9000 + - port: 24240 protocol: TCP --- apiVersion: networking.k8s.io/v1 @@ -79,54 +70,4 @@ spec: - port: 24231 protocol: TCP --- -{{- if eq .spec.distribution.modules.logging.type "opensearch" }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: fluentd-egress-opensearch - namespace: logging - labels: - cluster.kfd.sighup.io/module: logging - cluster.kfd.sighup.io/logging-type: opensearch -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app.kubernetes.io/name: fluentd - egress: - - to: - - podSelector: - matchLabels: - app.kubernetes.io/name: opensearch - ports: - - port: 9200 - protocol: TCP ---- -{{- end }} -{{- if eq .spec.distribution.modules.logging.type "loki" }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: fluentd-egress-loki - namespace: logging - labels: - cluster.kfd.sighup.io/module: logging - cluster.kfd.sighup.io/logging-type: loki -spec: - policyTypes: - - Egress - podSelector: - matchLabels: - app.kubernetes.io/name: fluentd - egress: - - to: - - podSelector: - matchLabels: - app.kubernetes.io/name: loki-distributed - app.kubernetes.io/component: gateway - ports: - - port: 8080 - protocol: TCP ---- -{{- end }} + diff --git a/templates/distribution/manifests/logging/policies/loki.yaml.tpl b/templates/distribution/manifests/logging/policies/loki.yaml.tpl index 0afe2222c..7bae584c3 100644 --- a/templates/distribution/manifests/logging/policies/loki.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/loki.yaml.tpl @@ -134,7 +134,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: loki-distributed-egress-minio + name: loki-distributed-egress-all namespace: logging labels: cluster.kfd.sighup.io/module: logging @@ -146,13 +146,5 @@ spec: matchLabels: app.kubernetes.io/name: loki-distributed egress: - - to: - - podSelector: - matchLabels: - app: minio - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: logging - ports: - - port: 9000 - protocol: TCP + - {} +--- From e995f6eef68d837d27ea77a8dfb374631b001179 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Fri, 22 Nov 2024 12:49:51 +0100 Subject: [PATCH 111/160] fix(schemas/eks): improvements for AMI fields - Improve description fields for EKS NodePools AMI configuration - Improve dependencies definitions in schema, so mutually exclusive options are considered. --- .../ekscluster/v1alpha2/private/schema.go | 2585 +++++++++-------- pkg/apis/ekscluster/v1alpha2/public/schema.go | 2122 +++++++------- schemas/private/ekscluster-kfd-v1alpha2.json | 60 +- schemas/public/ekscluster-kfd-v1alpha2.json | 60 +- 4 files changed, 2512 insertions(+), 2315 deletions(-) diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index c4692c2e9..449c29b0b 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -1482,6 +1482,10 @@ type SpecKubernetes struct { // using the ec2-user user NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` @@ -1590,7 +1594,7 @@ type SpecKubernetesNodePool struct { // Kubernetes labels that will be added to the nodes Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - // The name of the node pool + // The name of the node pool. Name string `json:"name" yaml:"name" mapstructure:"name"` // Size corresponds to the JSON schema field "size". @@ -1605,8 +1609,10 @@ type SpecKubernetesNodePool struct { // Kubernetes taints that will be added to the nodes Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - // Type corresponds to the JSON schema field "type". - Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + // The type of Node Pool, can be `self-managed` for using a custom AMI or + // `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It + // is reccomended to use `self-managed` with an `ami.type`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { @@ -1711,14 +1717,36 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` } +// Configuration for using custom a Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI or by setting the `ami.type` field to one of the official +// AMIs based on Amazon Linux. type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes - Id string `json:"id" yaml:"id" mapstructure:"id"` + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` - // The owner of the AMI - Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` + + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } +type SpecKubernetesNodePoolAmiType string + +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + type SpecKubernetesNodePoolContainerRuntime string const ( @@ -1726,6 +1754,13 @@ const ( SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" ) +type SpecKubernetesNodePoolGlobalAmiType string + +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + type SpecKubernetesNodePoolInstance struct { // MaxPods corresponds to the JSON schema field "maxPods". MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` @@ -1924,189 +1959,229 @@ type TypesAwsSubnetId string type TypesAwsTags map[string]string -type TypesAwsVpcId string - -type TypesCidr string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") + } + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") + } + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) + return nil } -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesIpAddress string - -type TypesKubeLabels map[string]string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeNodeSelector map[string]string - -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type TypesCidr string - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil } -type TypesKubeTaints []string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + return nil +} -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -type TypesKubeTolerationEffect string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" -) - -type TypesKubeTolerationEffect_1 string - -const ( - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" -) - -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - -type TypesKubeTolerationOperator_1 string - -const ( - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesTcpPort int - -type TypesUri string - -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + } + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetwork(plain) + return nil } var enumValues_SpecDistributionModulesDrType = []interface{}{ @@ -2114,1108 +2189,1178 @@ var enumValues_SpecDistributionModulesDrType = []interface{}{ "eks", } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") + } + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpc(plain) + return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ "minio", "externalEndpoint", } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} +type TypesTcpPort int -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") + } + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") + } + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") + } + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + } + type Plain SpecDistributionModulesAws + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAws(plain) + return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + } + type Plain SpecInfrastructureVpnSsh + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) + return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} +type TypesAwsVpcId string -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", -} +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistributionModulesAwsLoadBalancerController var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesAwsLoadBalancerController(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAwsAuthRole(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + type Plain SpecDistributionModulesAwsEbsCsiDriver + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + *j = SpecKubernetesAwsAuthUser(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + } + type Plain SpecDistributionModulesAwsClusterAutoscaler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + } + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecDistributionModulesAuthDex(plain) return nil } +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesAuth + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAwsClusterAutoscaler - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesAwsAuthUser + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAwsEbsCsiDriver - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesAwsAuthRole - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesNetworkingType(v) return nil } +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecDistributionModulesAwsLoadBalancerController - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecKubernetesNodePoolSize + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") - } - type Plain SpecInfrastructureVpn + type Plain SpecKubernetesNodePoolAdditionalFirewallRules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - type Plain SpecDistributionModulesAws + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAws(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecKubernetesNodePoolAmiType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") - } - type Plain SpecInfrastructureVpcNetwork - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - *j = SpecInfrastructureVpcNetwork(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") - } - type Plain SpecKubernetesNodePool - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePool(plain) - return nil +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) - return nil +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["route53"]; !ok || v == nil { + return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") - } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetes - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecKubernetes(plain) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecPluginsHelmReleasesElemSetElem + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecPluginsHelmReleasesElemSetElem(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) - } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } @@ -3239,1008 +3384,964 @@ func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") } - *j = SpecDistributionModulesTracingTempoBackend(v) + type Plain SpecDistributionModulesIngressExternalDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressExternalDNS(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } +type TypesKubeLabels_1 map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecToolsConfigurationTerraformState + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } +type TypesKubeTaints []string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") - } - type Plain SpecToolsConfigurationTerraform - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraform(plain) - return nil +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecKubernetesNodePoolType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") - } - type Plain SpecToolsConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfiguration(plain) - return nil +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") - } - type Plain Spec - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = Spec(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") } - type Plain TypesKubeToleration + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecKubernetesNodePool(plain) return nil } +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionModulesIngressNginx(plain) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") } - type Plain SpecDistributionModulesDrVeleroEks + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecDistributionModulesIngress(plain) return nil } +type TypesKubeLabels map[string]string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetes(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) - return nil +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["route53"]; !ok || v == nil { - return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) - } - *j = TypesKubeTolerationOperator_1(v) - return nil +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") - } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesIngressExternalDNS + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressExternalDNS(plain) + *j = SpecToolsConfiguration(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") + return fmt.Errorf("field effect in TypesKubeToleration: required") } if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + return fmt.Errorf("field key in TypesKubeToleration: required") } - type Plain TypesKubeToleration_1 + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = TypesKubeToleration(plain) return nil } +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = TypesKubeTolerationOperator(v) return nil } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", } +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeTolerationEffect_1 string + +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = TypesKubeTolerationEffect_1(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil +const ( + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" +) + +type TypesKubeTolerationOperator_1 string + +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") - } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionModulesIngress(plain) + *j = TypesKubeTolerationOperator_1(v) return nil } +const ( + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = TypesKubeToleration_1(plain) return nil } +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +type TypesKubeTolerationEffect string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") - } - type Plain SpecDistributionModulesLoggingLoki - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingLoki(plain) - return nil -} +type TypesEnvRef string + +type TypesFileRef string + +type TypesIpAddress string + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesUri string // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { @@ -4260,24 +4361,8 @@ func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) - } - *j = SpecDistributionModulesLoggingType(v) - return nil +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", } // UnmarshalJSON implements json.Unmarshaler. @@ -4300,25 +4385,7 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +type TypesKubeNodeSelector map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index b2edf0592..4ae91eee9 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1435,6 +1435,10 @@ type SpecKubernetes struct { // using the ec2-user user NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` @@ -1543,7 +1547,7 @@ type SpecKubernetesNodePool struct { // Kubernetes labels that will be added to the nodes Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - // The name of the node pool + // The name of the node pool. Name string `json:"name" yaml:"name" mapstructure:"name"` // Size corresponds to the JSON schema field "size". @@ -1558,8 +1562,10 @@ type SpecKubernetesNodePool struct { // Kubernetes taints that will be added to the nodes Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - // Type corresponds to the JSON schema field "type". - Type *SpecKubernetesNodePoolType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + // The type of Node Pool, can be `self-managed` for using a custom AMI or + // `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It + // is reccomended to use `self-managed` with an `ami.type`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { @@ -1646,1645 +1652,1533 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil -} +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" +) -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} +type SpecKubernetesNodePoolAdditionalFirewallRules struct { + // The CIDR blocks for the FW rule. At the moment the first item of the list will + // be used, others will be ignored. + CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil -} + // Self corresponds to the JSON schema field "self". + Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil + // SourceSecurityGroupId corresponds to the JSON schema field + // "sourceSecurityGroupId". + SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil +// Configuration for using custom a Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI or by setting the `ami.type` field to one of the official +// AMIs based on Amazon Linux. +type SpecKubernetesNodePoolAmi struct { + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` + + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +type SpecKubernetesNodePoolAmiType string + +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + +type SpecKubernetesNodePoolContainerRuntime string + +const ( + SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" + SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" +) + +type SpecKubernetesNodePoolGlobalAmiType string + +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +type SpecKubernetesNodePoolInstance struct { + // MaxPods corresponds to the JSON schema field "maxPods". + MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` + + // If true, the nodes will be created as spot instances + Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` + + // The instance type to use for the nodes + Type string `json:"type" yaml:"type" mapstructure:"type"` + + // The size of the disk in GB + VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` + + // VolumeType corresponds to the JSON schema field "volumeType". + VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") - } - type Plain SpecDistributionModulesLoggingLoki - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingLoki(plain) - return nil +type SpecKubernetesNodePoolInstanceVolumeType string + +const ( + SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" + SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" + SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" + SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" +) + +type SpecKubernetesNodePoolSize struct { + // The maximum number of nodes in the node pool + Max int `json:"max" yaml:"max" mapstructure:"max"` + + // The minimum number of nodes in the node pool + Min int `json:"min" yaml:"min" mapstructure:"min"` } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +type SpecKubernetesNodePoolType string - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +const ( + SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" + SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" +) + +type SpecKubernetesNodePoolsLaunchKind string + +const ( + SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" + SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" + SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" +) + +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` + + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) - } - *j = SpecDistributionModulesMonitoringMimirBackend(v) - return nil +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` + + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` + + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") - } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngress(plain) - return nil +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // This value defines which folder will be used to store all the states inside the + // bucket + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + + // This value defines in which region the bucket is located + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } +type TypesAwsArn string + +type TypesAwsIamRoleName string + +type TypesAwsIamRoleNamePrefix string + +type TypesAwsIpProtocol string + +type TypesAwsRegion string + +const ( + TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" +) + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ +var enumValues_SpecDistributionModulesTracingType = []interface{}{ "none", - "single", - "dual", + "tempo", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesTracingType(v) return nil } +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) - return nil -} - -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesPolicy(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesTracing(plain) return nil } +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - type Plain SpecDistributionModulesIngressDNSPrivate - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - *j = SpecDistributionModulesIngressDNSPrivate(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModules(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecDistributionModulesPolicyKyverno var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistribution(plain) return nil } +type TypesCidr string + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + } + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) - } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -const ( - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecInfrastructureVpc(plain) return nil } -const TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" +type TypesAwsS3BucketNamePrefix string -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} +type TypesTcpPort int // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -const ( - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecDistributionModulesPolicyKyverno + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } -const TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" +type TypesAwsVpcId string -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") } - *j = SpecDistributionModulesPolicyType(v) + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) return nil } -const ( - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } -const ( - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" -) - -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } -const ( - TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + } + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesAPIServer(plain) + return nil +} -var enumValues_SpecDistributionModulesTracingType = []interface{}{ +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ "none", - "tempo", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -const ( - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } -const TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - type Plain SpecDistributionModules + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } -const TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistribution + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesLogging(plain) return nil } -type TypesCidr string - -const TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } -const TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpcNetwork - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecKubernetesLogsTypesElem(v) return nil } -const TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -type TypesAwsS3BucketNamePrefix string - -type TypesTcpPort int +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} -const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -type TypesAwsVpcId string - -const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The cpu request for the prometheus pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpn - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecInfrastructureVpn(plain) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } +type TypesKubeResourcesLimits struct { + // The cpu limit for the opensearch pods + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the opensearch pods + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } -type TypesAwsArn string - -type TypesAwsRegion string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecKubernetesAwsAuthRole + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } -type TypesAwsS3BucketName string +type TypesAwsTags map[string]string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecDistributionModulesIngress(plain) return nil } +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -type TypesAwsIamRoleNamePrefix string - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type TypesAwsIamRoleName string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - *j = SpecDistributionModulesAuth(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } -type TypesAwsIpProtocol string - -type TypesAwsTags map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ "ingress", "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - *j = SpecDistributionModulesAuthProviderType(v) + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ "ingress", "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecDistributionModulesAuthDex + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecKubernetesNodePoolAmiType(v) return nil } -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" -) - -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecDistributionModulesDr(plain) return nil } -type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. - CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` - - // Self corresponds to the JSON schema field "self". - Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` - - // SourceSecurityGroupId corresponds to the JSON schema field - // "sourceSecurityGroupId". - SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + } + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -type SpecKubernetesNodePoolAmi struct { - // The AMI ID to use for the nodes - Id string `json:"id" yaml:"id" mapstructure:"id"` - - // The owner of the AMI - Owner string `json:"owner" yaml:"owner" mapstructure:"owner"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmi) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["id"]; !ok || v == nil { - return fmt.Errorf("field id in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - if v, ok := raw["owner"]; !ok || v == nil { - return fmt.Errorf("field owner in SpecKubernetesNodePoolAmi: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecKubernetesNodePoolAmi + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAmi(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } -type SpecKubernetesNodePoolContainerRuntime string +const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ "docker", @@ -3312,12 +3206,11 @@ func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { } const ( - SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" - SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" ) -type SpecKubernetesNodePoolInstanceVolumeType string - var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ "gp2", "gp3", @@ -3326,47 +3219,81 @@ var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } const ( - SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" - SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" - SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" - SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" ) -type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". - MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - - // If true, the nodes will be created as spot instances - Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - - // The instance type to use for the nodes - Type string `json:"type" yaml:"type" mapstructure:"type"` - - // The size of the disk in GB - VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil +} - // VolumeType corresponds to the JSON schema field "volumeType". - VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", } // UnmarshalJSON implements json.Unmarshaler. @@ -3387,15 +3314,9 @@ func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeLabels_1 map[string]string - -type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool - Max int `json:"max" yaml:"max" mapstructure:"max"` +type TypesAwsS3BucketName string - // The minimum number of nodes in the node pool - Min int `json:"min" yaml:"min" mapstructure:"min"` -} +type TypesKubeLabels_1 map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { @@ -3422,7 +3343,25 @@ type TypesAwsSubnetId string type TypesKubeTaints []string -type SpecKubernetesNodePoolType string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} var enumValues_SpecKubernetesNodePoolType = []interface{}{ "eks-managed", @@ -3449,89 +3388,157 @@ func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { return nil } -const ( - SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" - SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" -) +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // The node selector to use to place the pods for the dr module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the monitoring module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePool(plain) + return nil +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The host of the ingress + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // The ingress class of the ingress + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + } + *j = SpecKubernetesNodePoolsLaunchKind(v) + return nil +} + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecKubernetesNodePool + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } -type SpecKubernetesNodePoolsLaunchKind string - -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -const ( - SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" - SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" - SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" -) - -type TypesKubeLabels map[string]string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -3559,12 +3566,10 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { return nil } -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. @@ -3588,78 +3593,112 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { return nil } -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // Disable running `helm diff` validation when installing the plugin, it will - // still be done when upgrading. - DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil } -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil } -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` +type TypesFuryModuleComponentOverrides struct { + // The node selector to use to place the pods for the minio module + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` + // The tolerations that will be added to the pods for the cert-manager module + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil } type TypesAwsS3KeyPrefix string -type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - - // This value defines which folder will be used to store all the states inside the - // bucket - KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - - // This value defines in which region the bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` - - // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region - SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -3686,9 +3725,10 @@ func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { return nil } -type SpecToolsConfigurationTerraformState struct { - // S3 corresponds to the JSON schema field "s3". - S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } // UnmarshalJSON implements json.Unmarshaler. @@ -3709,9 +3749,22 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { return nil } -type SpecToolsConfigurationTerraform struct { - // State corresponds to the JSON schema field "state". - State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -3732,10 +3785,7 @@ func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { return nil } -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` -} +type TypesKubeLabels map[string]string // UnmarshalJSON implements json.Unmarshaler. func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index a425f728f..39a81844b 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -555,6 +555,7 @@ "additionalProperties": false, "properties": { "type": { + "description": "The type of Node Pool, can be `self-managed` for using a custom AMI or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is reccomended to use `self-managed` with an `ami.type`.", "type": "string", "enum": [ "eks-managed", @@ -563,7 +564,7 @@ }, "name": { "type": "string", - "description": "The name of the node pool" + "description": "The name of the node pool." }, "ami": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" @@ -649,30 +650,69 @@ "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, + "description": "Configuration for using custom a Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { "id": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, "owner": { "type": "string", - "description": "The owner of the AMI" + "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, "type": { "type": "string", - "description": "The AMI type based on OS", + "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.", "enum": [ "alinux2", "alinux2023" ] } }, - "dependencies": { - "id": [ - "owner" - ] - }, - "required": [] + "oneOf": [ + { + "allOf": [ + { + "required": [ + "id", + "owner" + ] + }, + { + "not": { + "required": [ + "type" + ] + } + } + ] + }, + { + "allOf": [ + { + "required": [ + "type" + ] + }, + { + "not": { + "anyOf": [ + { + "required": [ + "id" + ] + }, + { + "required": [ + "owner" + ] + } + ] + } + } + ] + } + ] }, "Spec.Kubernetes.NodePool.Instance": { "type": "object", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 6429beba2..8d482226b 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -555,6 +555,7 @@ "additionalProperties": false, "properties": { "type": { + "description": "The type of Node Pool, can be `self-managed` for using a custom AMI or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is reccomended to use `self-managed` with an `ami.type`.", "type": "string", "enum": [ "eks-managed", @@ -563,7 +564,7 @@ }, "name": { "type": "string", - "description": "The name of the node pool" + "description": "The name of the node pool." }, "ami": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" @@ -649,30 +650,69 @@ "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, + "description": "Configuration for using custom a Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { "id": { "type": "string", - "description": "The AMI ID to use for the nodes" + "description": "The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, "owner": { "type": "string", - "description": "The owner of the AMI" + "description": "The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`." }, "type": { "type": "string", - "description": "The AMI type based on OS", + "description": "The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`.", "enum": [ "alinux2", "alinux2023" ] } }, - "dependencies": { - "id": [ - "owner" - ] - }, - "required": [] + "oneOf": [ + { + "allOf": [ + { + "required": [ + "id", + "owner" + ] + }, + { + "not": { + "required": [ + "type" + ] + } + } + ] + }, + { + "allOf": [ + { + "required": [ + "type" + ] + }, + { + "not": { + "anyOf": [ + { + "required": [ + "id" + ] + }, + { + "required": [ + "owner" + ] + } + ] + } + } + ] + } + ] }, "Spec.Kubernetes.NodePool.Instance": { "type": "object", From 3a18539e440e17d8bab8d6ea0bda706a913f0894 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 11:35:32 +0100 Subject: [PATCH 112/160] Apply suggestions from code review Co-authored-by: Giuseppe Iannelli <94362884+g-iannelli@users.noreply.github.com> --- schemas/private/ekscluster-kfd-v1alpha2.json | 4 ++-- schemas/public/ekscluster-kfd-v1alpha2.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 39a81844b..538188105 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -555,7 +555,7 @@ "additionalProperties": false, "properties": { "type": { - "description": "The type of Node Pool, can be `self-managed` for using a custom AMI or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is reccomended to use `self-managed` with an `ami.type`.", + "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", "enum": [ "eks-managed", @@ -650,7 +650,7 @@ "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, - "description": "Configuration for using custom a Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", + "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { "id": { "type": "string", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 8d482226b..7c0f91e64 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -555,7 +555,7 @@ "additionalProperties": false, "properties": { "type": { - "description": "The type of Node Pool, can be `self-managed` for using a custom AMI or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is reccomended to use `self-managed` with an `ami.type`.", + "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", "enum": [ "eks-managed", @@ -650,7 +650,7 @@ "Spec.Kubernetes.NodePool.Ami": { "type": "object", "additionalProperties": false, - "description": "Configuration for using custom a Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", + "description": "Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool.\n\nThe AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux.", "properties": { "id": { "type": "string", From 5f8582d5caa95b59ba03272155a1c59c647f1575 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 12:13:26 +0100 Subject: [PATCH 113/160] chore: bump logging to v4.0.0-rc.1, update go models and docs --- docs/schemas/ekscluster-kfd-v1alpha2.md | 69 +++++++++++++------ kfd.yaml | 2 +- .../ekscluster/v1alpha2/private/schema.go | 12 ++-- pkg/apis/ekscluster/v1alpha2/public/schema.go | 12 ++-- 4 files changed, 61 insertions(+), 34 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 5a237c0f7..883026ec9 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -4800,6 +4800,7 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [logRetentionDays](#speckuberneteslogretentiondays) | `integer` | Optional | | [logsTypes](#speckuberneteslogstypes) | `array` | Optional | | [nodeAllowedSshPublicKey](#speckubernetesnodeallowedsshpublickey) | `object` | Required | +| [nodePoolGlobalAmiType](#speckubernetesnodepoolglobalamitype) | `string` | Optional | | [nodePools](#speckubernetesnodepools) | `array` | Required | | [nodePoolsLaunchKind](#speckubernetesnodepoolslaunchkind) | `string` | Required | | [serviceIpV4Cidr](#speckubernetesserviceipv4cidr) | `string` | Optional | @@ -4984,6 +4985,21 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +## .spec.kubernetes.nodePoolGlobalAmiType + +### Description + +Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. + +### Constraints + +**enum**: the value of this property must be equal to one of the following string values: + +| Value | +|:-------------| +|`"alinux2"` | +|`"alinux2023"`| + ## .spec.kubernetes.nodePools ### Properties @@ -5001,7 +5017,7 @@ This key contains the ssh public key that can connect to the nodes via SSH using | [subnetIds](#speckubernetesnodepoolssubnetids) | `array` | Optional | | [tags](#speckubernetesnodepoolstags) | `object` | Optional | | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | -| [type](#speckubernetesnodepoolstype) | `string` | Optional | +| [type](#speckubernetesnodepoolstype) | `string` | Required | ## .spec.kubernetes.nodePools.additionalFirewallRules @@ -5252,20 +5268,42 @@ The type of the FW rule can be ingress or egress | Property | Type | Required | |:------------------------------------------|:---------|:---------| -| [id](#speckubernetesnodepoolsamiid) | `string` | Required | -| [owner](#speckubernetesnodepoolsamiowner) | `string` | Required | +| [id](#speckubernetesnodepoolsamiid) | `string` | Optional | +| [owner](#speckubernetesnodepoolsamiowner) | `string` | Optional | +| [type](#speckubernetesnodepoolsamitype) | `string` | Optional | + +### Description + +Configuration for customize the Amazon Machine Image (AMI) for the machines of the Node Pool. + +The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields for using a custom AMI (just with `self-managed` node pool type) or by setting the `ami.type` field to one of the official AMIs based on Amazon Linux. ## .spec.kubernetes.nodePools.ami.id ### Description -The AMI ID to use for the nodes +The ID of the AMI to use for the nodes, must be set toghether with the `owner` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`. ## .spec.kubernetes.nodePools.ami.owner ### Description -The owner of the AMI +The owner of the AMI to use for the nodes, must be set toghether with the `id` field. `ami.id` and `ami.owner` can be only set when Node Pool type is `self-managed` and they can't be set at the same time than `ami.type`. + +## .spec.kubernetes.nodePools.ami.type + +### Description + +The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at the same time than `ami.id` and `ami.owner`. + +### Constraints + +**enum**: the value of this property must be equal to one of the following string values: + +| Value | +|:-------------| +|`"alinux2"` | +|`"alinux2023"`| ## .spec.kubernetes.nodePools.attachedTargetGroups @@ -5353,7 +5391,7 @@ Kubernetes labels that will be added to the nodes ### Description -The name of the node pool +The name of the node pool. ## .spec.kubernetes.nodePools.size @@ -5412,6 +5450,10 @@ AWS tags that will be added to the ASG and EC2 instances ## .spec.kubernetes.nodePools.type +### Description + +The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5437,21 +5479,6 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u |`"launch_templates"` | |`"both"` | -## .spec.kubernetes.nodePoolGlobalAmiType - -### Description - -Global default AMI type used for EKS worker nodes. This will apply to all node pools unless overridden by a specific node pool. - -### Constraints - -**enum**: the value of this property must be equal to one of the following values: - -| Value | -|:---------------| -| `"alinux2"` | -| `"alinux2023"` | - ## .spec.kubernetes.serviceIpV4Cidr ### Description diff --git a/kfd.yaml b/kfd.yaml index ca28f80cb..fb1bb8235 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,7 @@ modules: aws: v4.3.0 dr: v3.0.0-rc.1 ingress: v3.0.1-rc.2 - logging: v4.0.0-rc.0 + logging: v4.0.0-rc.1 monitoring: v3.3.0-rc.1 opa: v1.13.0 networking: v2.0.0-rc.2 diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 449c29b0b..596d9060d 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -1609,9 +1609,9 @@ type SpecKubernetesNodePool struct { // Kubernetes taints that will be added to the nodes Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - // The type of Node Pool, can be `self-managed` for using a custom AMI or - // `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It - // is reccomended to use `self-managed` with an `ami.type`. + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1717,12 +1717,12 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` } -// Configuration for using custom a Amazon Machine Image (AMI) for the machines of +// Configuration for customize the Amazon Machine Image (AMI) for the machines of // the Node Pool. // // The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields -// for using a custom AMI or by setting the `ami.type` field to one of the official -// AMIs based on Amazon Linux. +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. type SpecKubernetesNodePoolAmi struct { // The ID of the AMI to use for the nodes, must be set toghether with the `owner` // field. `ami.id` and `ami.owner` can be only set when Node Pool type is diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 4ae91eee9..3db9f6e1d 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1562,9 +1562,9 @@ type SpecKubernetesNodePool struct { // Kubernetes taints that will be added to the nodes Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - // The type of Node Pool, can be `self-managed` for using a custom AMI or - // `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It - // is reccomended to use `self-managed` with an `ami.type`. + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1670,12 +1670,12 @@ type SpecKubernetesNodePoolAdditionalFirewallRules struct { SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` } -// Configuration for using custom a Amazon Machine Image (AMI) for the machines of +// Configuration for customize the Amazon Machine Image (AMI) for the machines of // the Node Pool. // // The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields -// for using a custom AMI or by setting the `ami.type` field to one of the official -// AMIs based on Amazon Linux. +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. type SpecKubernetesNodePoolAmi struct { // The ID of the AMI to use for the nodes, must be set toghether with the `owner` // field. `ami.id` and `ami.owner` can be only set when Node Pool type is From 7077f20e9ffecbaad588a83a746fc7de8ee29bce Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 16:20:13 +0100 Subject: [PATCH 114/160] feat: bump ingress to v3 final and monitoring rc --- kfd.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index fb1bb8235..64dc65e64 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -7,9 +7,9 @@ modules: auth: v0.4.0-rc.0 aws: v4.3.0 dr: v3.0.0-rc.1 - ingress: v3.0.1-rc.2 + ingress: v3.0.1 logging: v4.0.0-rc.1 - monitoring: v3.3.0-rc.1 + monitoring: v3.3.0-rc.2 opa: v1.13.0 networking: v2.0.0-rc.2 tracing: v1.1.0 From 18752805c0961cd50b2ed93377adb4056762ed7f Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 17:00:11 +0100 Subject: [PATCH 115/160] chore: generate go-models and docs --- pkg/apis/onpremises/v1alpha2/public/schema.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 1f77e2d7e..88946d9ca 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -63,6 +63,10 @@ type SpecDistribution struct { // Common configuration for all the distribution modules. type SpecDistributionCommon struct { + // EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided + // for core modules. + NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` + // The node selector to use to place the pods for all the KFD modules. Follows // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` @@ -86,9 +90,6 @@ type SpecDistributionCommon struct { // value: infra // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` - - // NetworkPoliciesEnabled corresponds to the JSON schema field "networkPoliciesEnabled". - NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` } type SpecDistributionCommonProvider struct { From 1bc104f3c68040f790f965ccad6a7cf3d92993bc Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 11 Nov 2024 14:57:11 +0100 Subject: [PATCH 116/160] feat(docs/schema): improve eks, kfdisitribution schema docs Port the improvements done for on-prem schema in #268 to the EKS and KFDDistribution schemas and banners used to render the markdown. Some minor improvements to On-premises too. fixes https://github.com/sighupio/product-management/issues/543 https://github.com/sighupio/furyctl/issues/287 --- banners/ekscluster.md | 8 +- banners/kfddistribution.md | 8 +- banners/onpremises.md | 8 +- defaults/ekscluster-kfd-v1alpha2.yaml | 2 +- docs/schemas/ekscluster-kfd-v1alpha2.md | 786 ++++++--- docs/schemas/kfddistribution-kfd-v1alpha2.md | 529 ++++-- docs/schemas/onpremises-kfd-v1alpha2.md | 78 +- .../ekscluster/v1alpha2/private/schema.go | 1571 ++++++++++++----- pkg/apis/ekscluster/v1alpha2/public/schema.go | 947 +++++----- .../kfddistribution/v1alpha2/public/schema.go | 414 ++++- pkg/apis/onpremises/v1alpha2/public/schema.go | 32 +- schemas/private/ekscluster-kfd-v1alpha2.json | 142 +- schemas/public/ekscluster-kfd-v1alpha2.json | 420 +++-- .../public/kfddistribution-kfd-v1alpha2.json | 267 +-- schemas/public/onpremises-kfd-v1alpha2.json | 42 +- .../config/ekscluster-kfd-v1alpha2.yaml.tpl | 4 +- 16 files changed, 3595 insertions(+), 1663 deletions(-) diff --git a/banners/ekscluster.md b/banners/ekscluster.md index a66d70188..873a47e0c 100644 --- a/banners/ekscluster.md +++ b/banners/ekscluster.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md index a44f13847..797d2678f 100644 --- a/banners/kfddistribution.md +++ b/banners/kfddistribution.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/onpremises.md b/banners/onpremises.md index a8d8983dd..7f05c77c8 100644 --- a/banners/onpremises.md +++ b/banners/onpremises.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 41e37df57..5f9b60864 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -46,7 +46,7 @@ data: name: "" create: true # internal field, should be either the VPC ID taken from the kubernetes - # phase or the ID of the created VPC in the Ifra phase + # phase or the ID of the created VPC in the Infra phase vpcId: "" # common configuration for nginx ingress controller nginx: diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 883026ec9..e028f2a70 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Description -A Fury Cluster deployed through AWS's Elastic Kubernetes Service +A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). ## .apiVersion @@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -110,21 +124,19 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider, must be EKS if specified +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). - -NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too. +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -139,7 +151,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -525,11 +543,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -542,17 +564,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -588,7 +625,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -603,7 +640,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -650,13 +687,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -671,7 +716,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -895,23 +940,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -969,7 +1023,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations @@ -984,7 +1038,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect @@ -1055,7 +1109,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations @@ -1070,7 +1124,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect @@ -1128,7 +1182,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations @@ -1143,7 +1197,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect @@ -1214,7 +1268,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations @@ -1229,7 +1283,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect @@ -1276,13 +1330,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.aws.overrides.ingresses ## .spec.distribution.modules.aws.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations @@ -1297,7 +1355,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations.effect @@ -1344,6 +1402,10 @@ The value of the toleration | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1354,13 +1416,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1375,7 +1441,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1416,7 +1482,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***eks*** +The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups. + +Default is `none`. ### Constraints @@ -1450,13 +1518,13 @@ The type of the DR, must be ***none*** or ***eks*** ### Description -The name of the velero bucket +The name of the bucket for Velero. ## .spec.distribution.modules.dr.velero.eks.region ### Description -The region where the velero bucket is located +The region where the bucket for Velero will be located. ### Constraints @@ -1507,7 +1575,7 @@ The region where the velero bucket is located ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1522,7 +1590,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1665,7 +1733,7 @@ Whether to install or not the default `manifests` and `full` backups schedules. ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1676,6 +1744,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1687,29 +1759,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***dns01*** or ***http01*** +The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge. ### Constraints @@ -1733,7 +1809,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1748,7 +1824,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1795,6 +1871,10 @@ The value of the toleration | [private](#specdistributionmodulesingressdnsprivate) | `object` | Optional | | [public](#specdistributionmodulesingressdnspublic) | `object` | Optional | +### Description + +DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission. + ## .spec.distribution.modules.ingress.dns.overrides ### Properties @@ -1808,7 +1888,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations @@ -1823,7 +1903,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect @@ -1869,17 +1949,21 @@ The value of the toleration | [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required | | [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required | +### Description + +The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone. + ## .spec.distribution.modules.ingress.dns.private.create ### Description -If true, the private hosted zone will be created +By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead. ## .spec.distribution.modules.ingress.dns.private.name ### Description -The name of the private hosted zone +The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. ## .spec.distribution.modules.ingress.dns.public @@ -1894,13 +1978,13 @@ The name of the private hosted zone ### Description -If true, the public hosted zone will be created +By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead. ## .spec.distribution.modules.ingress.dns.public.name ### Description -The name of the public hosted zone +The name of the public hosted zone. ## .spec.distribution.modules.ingress.forecastle @@ -1923,7 +2007,7 @@ The name of the public hosted zone ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1938,7 +2022,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1987,7 +2071,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -2002,7 +2086,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -2017,7 +2101,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -2067,7 +2151,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -2089,21 +2173,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -2125,6 +2226,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -2147,25 +2252,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -2180,7 +2285,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -2232,6 +2337,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -2240,6 +2349,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -2253,7 +2366,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -2268,7 +2381,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -2322,55 +2435,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -2383,8 +2496,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -2406,35 +2527,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -2458,13 +2583,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2479,13 +2604,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2507,6 +2632,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2520,7 +2649,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2535,7 +2664,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2585,19 +2714,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2623,7 +2752,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2638,7 +2767,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2697,13 +2826,13 @@ The value of the toleration ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2718,25 +2847,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2755,6 +2884,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2768,7 +2901,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2783,7 +2916,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2830,13 +2963,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2851,7 +2988,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2892,7 +3029,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2925,7 +3068,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2941,19 +3084,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2976,7 +3119,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2991,7 +3134,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -3059,7 +3202,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -3074,7 +3217,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -3144,7 +3287,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -3159,7 +3302,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -3207,11 +3350,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -3234,35 +3381,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -3277,7 +3428,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -3292,7 +3443,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -3333,7 +3484,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -3345,6 +3496,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -3358,7 +3513,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -3373,7 +3528,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -3423,19 +3578,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -3447,13 +3602,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -3468,7 +3627,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3547,13 +3706,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3568,31 +3727,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3633,13 +3792,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3654,24 +3813,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3705,7 +3866,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3720,7 +3881,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3766,20 +3927,31 @@ The value of the toleration | [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional | | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.overrides ### Properties | Property | Type | Required | |:------------------------------------------------------------------------|:---------|:---------| +| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional | | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + +## .spec.distribution.modules.networking.overrides.ingresses + ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3794,7 +3966,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3852,7 +4024,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3867,7 +4039,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3915,6 +4087,10 @@ The value of the toleration | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3926,6 +4102,10 @@ The value of the toleration | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3936,7 +4116,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3952,7 +4132,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3967,7 +4147,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3982,7 +4162,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -4030,17 +4210,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -4055,7 +4239,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -4070,7 +4254,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -4111,7 +4295,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -4132,13 +4316,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -4153,7 +4341,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -4194,7 +4382,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -4217,6 +4407,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -4227,6 +4421,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -4240,7 +4438,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -4255,7 +4453,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -4305,19 +4503,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -4329,13 +4527,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -4350,7 +4552,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -4398,11 +4600,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4425,35 +4631,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4468,7 +4678,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4483,7 +4693,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4524,13 +4734,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4543,6 +4755,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4566,7 +4782,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Description -This key defines the VPC that will be created in AWS +Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead. ## .spec.infrastructure.vpc.network @@ -4581,7 +4797,7 @@ This key defines the VPC that will be created in AWS ### Description -This is the CIDR of the VPC that will be created +The network CIDR for the VPC that will be created ### Constraints @@ -4602,11 +4818,15 @@ This is the CIDR of the VPC that will be created | [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required | | [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required | +### Description + +Network CIDRS configuration for private and public subnets. + ## .spec.infrastructure.vpc.network.subnetsCidrs.private ### Description -These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4622,7 +4842,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the ### Description -These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -4654,31 +4874,31 @@ These are the CIDRs for the public subnets, where the public load balancers and ### Description -This section defines the creation of VPN bastions +Configuration for the VPN server instances. ## .spec.infrastructure.vpn.bucketNamePrefix ### Description -This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states +This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users). ## .spec.infrastructure.vpn.dhParamsBits ### Description -The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file +The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file. ## .spec.infrastructure.vpn.diskSize ### Description -The size of the disk in GB +The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB. ## .spec.infrastructure.vpn.iamUserNameOverride ### Description -Overrides the default IAM user name for the VPN +Overrides IAM user name for the VPN. Default is to use the cluster name. ### Constraints @@ -4694,25 +4914,25 @@ Overrides the default IAM user name for the VPN ### Description -The size of the AWS EC2 instance +The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`. ## .spec.infrastructure.vpn.instances ### Description -The number of instances to create, 0 to skip the creation +The number of VPN server instances to create, `0` to skip the creation. ## .spec.infrastructure.vpn.operatorName ### Description -The username of the account to create in the bastion's operating system +The username of the account to create in the bastion's operating system. ## .spec.infrastructure.vpn.port ### Description -The port used by the OpenVPN server +The port where each OpenVPN server will listen for connections. ## .spec.infrastructure.vpn.ssh @@ -4728,7 +4948,7 @@ The port used by the OpenVPN server ### Description -The CIDR enabled in the security group that can access the bastions in SSH +The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. ### Constraints @@ -4744,7 +4964,7 @@ The CIDR enabled in the security group that can access the bastions in SSH ### Description -The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user +List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user. ### Constraints @@ -4754,13 +4974,13 @@ The github user name list that will be used to get the ssh public key that will ### Description -This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented +**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system. ## .spec.infrastructure.vpn.vpcId ### Description -The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted +The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted. ### Constraints @@ -4776,7 +4996,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast ### Description -The CIDR that will be used to assign IP addresses to the VPN clients when connected +The network CIDR that will be used to assign IP addresses to the VPN clients when connected. ### Constraints @@ -4808,6 +5028,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [vpcId](#speckubernetesvpcid) | `string` | Optional | | [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional | +### Description + +Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. + ## .spec.kubernetes.apiServer ### Properties @@ -4823,13 +5047,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec ### Description -This value defines if the API server will be accessible only from the private subnets +This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`. ## .spec.kubernetes.apiServer.privateAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the private subnets +The network CIDRs from the private subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4845,13 +5069,13 @@ This value defines the CIDRs that will be allowed to access the API server from ### Description -This value defines if the API server will be accessible from the public subnets +This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`. ## .spec.kubernetes.apiServer.publicAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the public subnets +The network CIDRs from the public subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4873,11 +5097,17 @@ This value defines the CIDRs that will be allowed to access the API server from | [roles](#speckubernetesawsauthroles) | `array` | Optional | | [users](#speckubernetesawsauthusers) | `array` | Optional | +### Description + +Optional additional security configuration for EKS IAM via the `aws-auth` configmap. + +Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html + ## .spec.kubernetes.awsAuth.additionalAccounts ### Description -This optional array defines additional AWS accounts that will be added to the aws-auth configmap +This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles @@ -4891,7 +5121,7 @@ This optional array defines additional AWS accounts that will be added to the aw ### Description -This optional array defines additional IAM roles that will be added to the aws-auth configmap +This optional array defines additional IAM roles that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles.groups @@ -4921,7 +5151,7 @@ This optional array defines additional IAM roles that will be added to the aws-a ### Description -This optional array defines additional IAM users that will be added to the aws-auth configmap +This optional array defines additional IAM users that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.users.groups @@ -4943,7 +5173,7 @@ This optional array defines additional IAM users that will be added to the aws-a ### Description -Overrides the default IAM role name prefix for the EKS cluster +Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name. ### Constraints @@ -4959,7 +5189,37 @@ Overrides the default IAM role name prefix for the EKS cluster ### Description -Optional Kubernetes Cluster log retention in days. Defaults to 90 days. +Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days. + +### Constraints + +**enum**: the value of this property must be equal to one of the following integer values: + +| Value | +|:----| +|0 | +|1 | +|3 | +|5 | +|7 | +|14 | +|30 | +|60 | +|90 | +|120 | +|150 | +|180 | +|365 | +|400 | +|545 | +|731 | +|1096| +|1827| +|2192| +|2557| +|2922| +|3288| +|3653| ## .spec.kubernetes.logsTypes @@ -4983,7 +5243,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Description -This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file. ## .spec.kubernetes.nodePoolGlobalAmiType @@ -5019,6 +5279,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | | [type](#speckubernetesnodepoolstype) | `string` | Required | +### Description + +Array with all the node pool definitions that will join the cluster. Each item is an object. + ## .spec.kubernetes.nodePools.additionalFirewallRules ### Properties @@ -5029,6 +5293,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional | | [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional | +### Description + +Optional additional firewall rules that will be attached to the nodes. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks ### Properties @@ -5044,10 +5312,12 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p ### Description -The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored. +The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details. ### Constraints +**maximum number of items**: the maximum number of items for this array is: `1` + **minimum number of items**: the minimum number of items for this array is: `1` ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks @@ -5075,6 +5345,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b | [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to @@ -5093,8 +5367,16 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags +### Description + +Additional AWS tags for the Firewall rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type +### Description + +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5125,7 +5407,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Description -The name of the FW rule +The name of the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports @@ -5136,6 +5418,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to @@ -5144,7 +5430,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5160,19 +5446,19 @@ The protocol of the FW rule ### Description -If true, the source will be the security group itself +If `true`, the source will be the security group itself. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5204,7 +5490,7 @@ The type of the FW rule can be ingress or egress ### Description -The name of the FW rule +The name for the additional Firewall rule Security Group. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports @@ -5215,6 +5501,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to @@ -5223,7 +5513,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5239,19 +5529,19 @@ The protocol of the FW rule ### Description -The source security group ID +The source security group ID. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5309,7 +5599,7 @@ The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of ### Description -This optional array defines additional target groups to attach to the instances in the node pool +This optional array defines additional target groups to attach to the instances in the node pool. ### Constraints @@ -5325,7 +5615,7 @@ This optional array defines additional target groups to attach to the instances ### Description -The container runtime to use for the nodes +The container runtime to use in the nodes of the node pool. Default is `containerd`. ### Constraints @@ -5348,28 +5638,42 @@ The container runtime to use for the nodes | [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional | | [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional | +### Description + +Configuration for the instances that will be used in the node pool. + ## .spec.kubernetes.nodePools.instance.maxPods +### Description + +Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type. + +Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + ## .spec.kubernetes.nodePools.instance.spot ### Description -If true, the nodes will be created as spot instances +If `true`, the nodes will be created as spot instances. Default is `false`. ## .spec.kubernetes.nodePools.instance.type ### Description -The instance type to use for the nodes +The instance type to use for the nodes. ## .spec.kubernetes.nodePools.instance.volumeSize ### Description -The size of the disk in GB +The size of the disk in GB. ## .spec.kubernetes.nodePools.instance.volumeType +### Description + +Volume type for the instance disk. Default is `gp2`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5385,7 +5689,7 @@ The size of the disk in GB ### Description -Kubernetes labels that will be added to the nodes +Kubernetes labels that will be added to the nodes. ## .spec.kubernetes.nodePools.name @@ -5406,19 +5710,19 @@ The name of the node pool. ### Description -The maximum number of nodes in the node pool +The maximum number of nodes in the node pool. ## .spec.kubernetes.nodePools.size.min ### Description -The minimum number of nodes in the node pool +The minimum number of nodes in the node pool. ## .spec.kubernetes.nodePools.subnetIds ### Description -This value defines the subnet IDs where the nodes will be created +Optional list of subnet IDs where to create the nodes. ### Constraints @@ -5434,7 +5738,7 @@ This value defines the subnet IDs where the nodes will be created ### Description -AWS tags that will be added to the ASG and EC2 instances +AWS tags that will be added to the ASG and EC2 instances. ## .spec.kubernetes.nodePools.taints @@ -5467,7 +5771,7 @@ The type of Node Pool, can be `self-managed` for using customization like custom ### Description -Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints @@ -5483,7 +5787,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Description -This value defines the CIDR that will be used to assign IP addresses to the services +This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services. ### Constraints @@ -5499,7 +5803,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv ### Description -This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created. ### Constraints @@ -5515,7 +5819,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require ### Description -This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created. ### Constraints @@ -5531,7 +5835,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on ### Description -Overrides the default IAM role name prefix for the EKS workers +Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name. ### Constraints @@ -5676,6 +5980,10 @@ The name of the kustomize plugin ## .spec.region +### Description + +Defines in which AWS region the cluster and all the related resources will be created. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5726,6 +6034,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [terraform](#spectoolsconfigurationterraform) | `object` | Required | +### Description + +Configuration for tools used by furyctl, like Terraform. + ## .spec.toolsConfiguration.terraform ### Properties @@ -5742,6 +6054,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [s3](#spectoolsconfigurationterraformstates3) | `object` | Required | +### Description + +Configuration for storing the Terraform state of the cluster. + ## .spec.toolsConfiguration.terraform.state.s3 ### Properties @@ -5753,17 +6069,21 @@ This map defines which will be the common tags that will be added to all the res | [region](#spectoolsconfigurationterraformstates3region) | `string` | Required | | [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional | +### Description + +Configuration for the S3 bucket used to store the Terraform state. + ## .spec.toolsConfiguration.terraform.state.s3.bucketName ### Description -This value defines which bucket will be used to store all the states +This value defines which bucket will be used to store all the states. ## .spec.toolsConfiguration.terraform.state.s3.keyPrefix ### Description -This value defines which folder will be used to store all the states inside the bucket +This value defines which folder will be used to store all the states inside the bucket. ### Constraints @@ -5781,7 +6101,7 @@ This value defines which folder will be used to store all the states inside the ### Description -This value defines in which region the bucket is located +This value defines in which region the bucket is located. ### Constraints @@ -5823,5 +6143,5 @@ This value defines in which region the bucket is located ### Description -This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region +This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region. diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index b663177e0..095a35e79 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +KFD modules deployed on top of an existing Kubernetes cluster. + ## .apiVersion ### Constraints @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. @@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -131,7 +149,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -493,7 +517,7 @@ The type of the secret ### Description -The kubeconfig file path +The path to the kubeconfig file. ## .spec.distribution.modules @@ -522,11 +546,15 @@ The kubeconfig file path | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -539,17 +567,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -600,7 +643,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -647,13 +690,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -668,7 +715,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -892,23 +939,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -930,6 +986,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -940,13 +1000,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -961,7 +1025,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1002,7 +1066,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints @@ -1025,6 +1091,10 @@ The type of the DR, must be ***none*** or ***on-premises*** | [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | | [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | +### Description + +Configuration for the Velero package. + ## .spec.distribution.modules.dr.velero.backend ### Description @@ -1099,7 +1169,7 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1114,7 +1184,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1274,7 +1344,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1285,6 +1355,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1296,29 +1370,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +List of challenge solvers to use instead of the default one for the `http01` challenge. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations. ### Constraints @@ -1341,7 +1419,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1356,7 +1434,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1414,7 +1492,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1429,7 +1507,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1478,7 +1556,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1493,7 +1571,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1508,7 +1586,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -1558,7 +1636,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -1580,21 +1658,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -1616,6 +1711,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1638,25 +1737,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1671,7 +1770,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1723,6 +1822,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1731,6 +1834,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1744,7 +1851,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1759,7 +1866,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -1813,55 +1920,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1874,8 +1981,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1897,35 +2012,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1949,13 +2068,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1970,13 +2089,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -1998,6 +2117,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2011,7 +2134,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2026,7 +2149,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2076,19 +2199,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2114,7 +2237,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2129,7 +2252,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2188,13 +2311,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2209,25 +2332,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2246,6 +2369,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2259,7 +2386,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2274,7 +2401,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2321,13 +2448,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2342,7 +2473,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2383,7 +2514,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2416,7 +2553,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2432,19 +2569,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2467,7 +2604,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2482,7 +2619,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -2550,7 +2687,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2565,7 +2702,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -2635,7 +2772,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2650,7 +2787,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -2698,11 +2835,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -2725,35 +2866,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +External S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2768,7 +2913,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2783,7 +2928,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -2824,7 +2969,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2836,6 +2981,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2849,7 +2998,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2864,7 +3013,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -2914,19 +3063,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2938,13 +3087,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2959,7 +3112,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3038,13 +3191,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3059,31 +3212,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the K8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3124,13 +3277,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3145,24 +3298,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3196,7 +3351,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3211,7 +3366,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3259,6 +3414,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3271,6 +3430,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.maskSize +### Description + +The mask size to use for the Pods network on each node. + ## .spec.distribution.modules.networking.cilium.overrides ### Properties @@ -3284,7 +3447,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3299,7 +3462,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect @@ -3338,6 +3501,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.podCidr +### Description + +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. + ### Constraints **pattern**: the string must match the following regular expression: @@ -3358,13 +3525,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3379,7 +3550,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3437,7 +3608,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3452,7 +3623,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3493,7 +3664,7 @@ The value of the toleration ### Description -The type of networking to use, either ***none***, ***calico*** or ***cilium*** +The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`. ### Constraints @@ -3516,6 +3687,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3527,6 +3702,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3537,7 +3716,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3553,7 +3732,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3568,7 +3747,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3583,7 +3762,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -3631,17 +3810,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3656,7 +3839,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3671,7 +3854,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -3712,7 +3895,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -3733,13 +3916,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3754,7 +3941,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -3795,7 +3982,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -3818,6 +4007,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3828,6 +4021,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3841,7 +4038,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3856,7 +4053,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -3906,19 +4103,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3930,13 +4127,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3951,7 +4152,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -3999,11 +4200,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4026,35 +4231,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +External S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4069,7 +4278,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4084,7 +4293,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4125,13 +4334,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4144,6 +4355,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index 67cfd9844..f3b0f827a 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +A KFD Cluster deployed on top of a set of existing VMs. + ## .apiVersion ### Constraints @@ -103,7 +113,7 @@ EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided f ### Description -The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra` +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -125,6 +135,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY. URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. + ## .spec.distribution.common.relativeVendorPath ### Description @@ -1077,6 +1089,8 @@ The type of the Auth provider, options are: - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1179,6 +1193,8 @@ The value of the toleration The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1494,7 +1510,7 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer +Name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers @@ -1666,7 +1682,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller package. +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1794,11 +1810,13 @@ The signing key file's content. You can use the `"{file://}"` notation to ### Description -The type of the nginx ingress controller, options are: +The type of the Ingress nginx controller, options are: - `none`: no ingress controller will be installed and no infrastructural ingresses will be created. - `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. - `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. +Default is `single`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1863,7 +1881,7 @@ Use this ingress class for the ingress instead of the default one. ### Description -Set to override the node selector used to place the pods of the Ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1878,7 +1896,7 @@ Set to override the node selector used to place the pods of the Ingress module ### Description -Set to override the tolerations that will be added to the pods of the Ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1946,7 +1964,7 @@ Configuration for the Logging module. ### Description -DEPRECATED in latest versions of KFD. +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. ## .spec.distribution.modules.logging.cerebro.overrides @@ -2178,13 +2196,13 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2199,13 +2217,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2421,13 +2439,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2442,13 +2460,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize @@ -2630,6 +2648,8 @@ Selects the logging stack. Options are: - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Default is `opensearch`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -2677,7 +2697,7 @@ Configuration for the Monitoring module. ### Description -The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules @@ -3299,13 +3319,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3320,13 +3340,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize @@ -3385,13 +3405,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3406,13 +3426,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type @@ -3425,6 +3445,8 @@ The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or ` - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +Default is `prometheus`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4089,6 +4111,8 @@ The value of the toleration The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4445,6 +4469,8 @@ The retention time for the traces stored in Tempo. The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. +Default is `tempo`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4458,7 +4484,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Description -Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1. +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. ### Constraints diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 596d9060d..b4117edb8 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -73,23 +73,28 @@ type SpecDistribution struct { } type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -123,45 +128,29 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` } -type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { - // The annotations of the configmap - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the configmap will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the configmap - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource + // Each entry should follow the format of Kustomize's images patch type SpecDistributionCustomPatchesImages []map[string]interface{} -type SpecDistributionCustomPatchesPatch struct { - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` - - // The patch content - Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` - - // The path of the patch - Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` - - // Target corresponds to the JSON schema field "target". - Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` -} - type SpecDistributionCustomPatchesPatchOptions struct { // If true, the kind change will be allowed AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` @@ -193,13 +182,73 @@ type SpecDistributionCustomPatchesPatchTarget struct { Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch // Each entry should be either a relative file path or an inline content resolving // to a partial or complete resource definition type SpecDistributionCustomPatchesPatchesStrategicMerge []string -type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesSecretGeneratorResource struct { // The behavior of the secret @@ -227,28 +276,26 @@ type SpecDistributionCustomPatchesSecretGeneratorResource struct { Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { - // The annotations of the secret - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the secret will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the secret - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + type SpecDistributionCustompatches struct { // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` @@ -267,12 +314,9 @@ type SpecDistributionCustompatches struct { SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` } -type SpecDistributionModules struct { - // Auth corresponds to the JSON schema field "auth". - Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - - // Aws corresponds to the JSON schema field "aws". - Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` // Dr corresponds to the JSON schema field "dr". Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` @@ -314,10 +358,23 @@ type SpecDistributionModulesAuth struct { } type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -347,162 +404,296 @@ type SpecDistributionModulesAuthOverrides struct { } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress -type SpecDistributionModulesAuthPomerium interface{} - -// override default routes for KFD components -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { - // GatekeeperPolicyManager corresponds to the JSON schema field - // "gatekeeperPolicyManager". - GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` - - // HubbleUi corresponds to the JSON schema field "hubbleUi". - HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` - - // IngressNgnixForecastle corresponds to the JSON schema field - // "ingressNgnixForecastle". - IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` - - // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". - LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` - - // LoggingOpensearchDashboards corresponds to the JSON schema field - // "loggingOpensearchDashboards". - LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` - - // MonitoringAlertmanager corresponds to the JSON schema field - // "monitoringAlertmanager". - MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` - - // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". - MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` - - // MonitoringMinioConsole corresponds to the JSON schema field - // "monitoringMinioConsole". - MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` +// Override the common configuration with a particular configuration for the Auth +// module. +type SpecDistributionModulesAuthOverrides struct { + // Override the definition of the Auth module ingresses. + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // MonitoringPrometheus corresponds to the JSON schema field - // "monitoringPrometheus". - MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + // Set to override the node selector used to place the pods of the Auth module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". - TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` + // Set to override the tolerations that will be added to the pods of the Auth + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} +type SpecDistributionModulesAuthPomerium interface{} -// Pomerium needs some user-provided secrets to be fully configured. These secrets -// should be unique between clusters. -type SpecDistributionModulesAuthPomeriumSecrets struct { - // Cookie Secret is the secret used to encrypt and sign session cookies. - // - // To generate a random key, run the following command: `head -c32 /dev/urandom | - // base64` - COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` - - // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth - // type is SSO, this value will be the secret used to authenticate Pomerium with - // Dex, **use a strong random value**. - IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` - - // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate - // requests between Pomerium services. It's critical that secret keys are random, - // and stored safely. - // - // To generate a key, run the following command: `head -c32 /dev/urandom | base64` - SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` +// Configuration for the HTTP Basic Auth provider. +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for logging in with the HTTP basic authentication. + Password string `json:"password" yaml:"password" mapstructure:"password"` - // Signing Key is the base64 representation of one or more PEM-encoded private - // keys used to sign a user's attestation JWT, which can be consumed by upstream - // applications to pass along identifying user information like username, id, and - // groups. - // - // To generates an P-256 (ES256) signing key: - // - // ```bash - // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem - // # careful! this will output your private key in terminal - // cat ec_private.pem | base64 - // ``` - SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` + // The username for logging in with the HTTP basic authentication. + Username string `json:"username" yaml:"username" mapstructure:"username"` } -// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. -type SpecDistributionModulesAuthPomerium_2 struct { - // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". - DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // DEPRECATED: Use defaultRoutesPolicy and/or routes - Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` - - // Additional routes configuration for Pomerium. Follows Pomerium's route format: - // https://www.pomerium.com/docs/reference/routes - Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` - - // Secrets corresponds to the JSON schema field "secrets". - Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil } -type SpecDistributionModulesAuthProvider struct { - // BasicAuth corresponds to the JSON schema field "basicAuth". - BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` +type SpecDistributionModulesAuthProviderType string - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** - Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil } -type SpecDistributionModulesAuthProviderType string - const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) -type SpecDistributionModulesAws struct { - // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". - ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` - - // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} + +// Configuration for the Auth module. +type SpecDistributionModulesAuth struct { + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` + + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` + + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil +} + +type TypesAwsArn string + +type TypesAwsIamRoleName string + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAwsClusterAutoscaler struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + } + type Plain SpecDistributionModulesAwsClusterAutoscaler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + return nil +} + +type SpecDistributionModulesAwsEbsCsiDriver struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + } + type Plain SpecDistributionModulesAwsEbsCsiDriver + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + return nil +} + +type SpecDistributionModulesAwsEbsSnapshotController struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesAwsLoadBalancerController struct { + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + } + type Plain SpecDistributionModulesAwsLoadBalancerController + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsLoadBalancerController(plain) + return nil +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for the basic auth + Password string `json:"password" yaml:"password" mapstructure:"password"` + + // The username for the basic auth + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + +type SpecDistributionModulesAuthProviderType string + +const ( + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) + +type SpecDistributionModulesAws struct { + // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". + ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` + + // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` // EbsSnapshotController corresponds to the JSON schema field @@ -560,29 +751,112 @@ type SpecDistributionModulesDr struct { type SpecDistributionModulesDrType string const ( - SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" ) -type SpecDistributionModulesDrVelero struct { - // Eks corresponds to the JSON schema field "eks". - Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` +type TypesAwsS3BucketName string - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type TypesAwsRegion string - // Configuration for Velero's backup schedules. - Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", } +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil +} + +const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + +type Metadata struct { + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +const ( + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +) + type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -688,14 +962,47 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { type SpecDistributionModulesIngressCertManagerClusterIssuerType string +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} + const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) -type SpecDistributionModulesIngressClusterIssuerRoute53 struct { - // HostedZoneId corresponds to the JSON schema field "hostedZoneId". - HostedZoneId string `json:"hostedZoneId" yaml:"hostedZoneId" mapstructure:"hostedZoneId"` +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email address to use during the certificate issuing process. + Email string `json:"email" yaml:"email" mapstructure:"email"` + + // Name of the clusterIssuer. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Route53 corresponds to the JSON schema field "route53". + Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` @@ -715,25 +1022,85 @@ type SpecDistributionModulesIngressDNS struct { Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } -type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the private hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // VpcId corresponds to the JSON schema field "vpcId". - VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` -} - -type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the public hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - +type SpecDistributionModulesIngressDNSPrivate struct { + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // VpcId corresponds to the JSON schema field "vpcId". + VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPrivate(plain) + return nil +} + +type SpecDistributionModulesIngressDNSPublic struct { + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the public hosted zone. + Name string `json:"name" yaml:"name" mapstructure:"name"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil +} + +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. +type SpecDistributionModulesIngressDNS struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Private corresponds to the JSON schema field "private". + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` + + // Public corresponds to the JSON schema field "public". + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +} + type SpecDistributionModulesIngressExternalDNS struct { // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` @@ -742,6 +1109,27 @@ type SpecDistributionModulesIngressExternalDNS struct { PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + type Plain SpecDistributionModulesIngressExternalDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressExternalDNS(plain) + return nil +} + type SpecDistributionModulesIngressForecastle struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -772,26 +1160,105 @@ type SpecDistributionModulesIngressNginxTLSProvider string const ( SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" - SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} + +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + } + type Plain SpecDistributionModulesIngressNginxTLS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLS(plain) + return nil +} + type SpecDistributionModulesIngressNginxType string +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + const ( - SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) @@ -816,20 +1283,37 @@ type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - // CustomOutputs corresponds to the JSON schema field "customOutputs". - CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + // Set to override the node selector used to place the pods of the Ingress module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Loki corresponds to the JSON schema field "loki". - Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + // Set to override the tolerations that will be added to the pods of the Ingress + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` +type SpecDistributionModulesIngress struct { + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // Opensearch corresponds to the JSON schema field "opensearch". - Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. + CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` - // Operator corresponds to the JSON schema field "operator". - Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + // Dns corresponds to the JSON schema field "dns". + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` + + // ExternalDns corresponds to the JSON schema field "externalDns". + ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the Ingress nginx controller package. + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -844,61 +1328,62 @@ type SpecDistributionModulesLogging struct { Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } @@ -928,24 +1413,25 @@ type SpecDistributionModulesLoggingLoki struct { type SpecDistributionModulesLoggingLokiBackend string const ( - SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -956,7 +1442,7 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -975,10 +1461,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -996,11 +1483,19 @@ type SpecDistributionModulesLoggingOperator struct { type SpecDistributionModulesLoggingType string +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + const ( - SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" - SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" ) // configuration for the Monitoring module components @@ -1008,20 +1503,20 @@ type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1055,14 +1550,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1118,24 +1614,25 @@ type SpecDistributionModulesMonitoringMimir struct { type SpecDistributionModulesMonitoringMimirBackend string const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1151,13 +1648,27 @@ type SpecDistributionModulesMonitoringMinio struct { } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheus struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1171,16 +1682,18 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + type SpecDistributionModulesMonitoringPrometheusAgent struct { // Set this option to ship the collected metrics to a remote Prometheus receiver. // @@ -1195,33 +1708,120 @@ type SpecDistributionModulesMonitoringPrometheusAgent struct { Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} +type SpecDistributionModulesMonitoringType string -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} -type SpecDistributionModulesMonitoringType string +const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" + +// UnmarshalJSON implements json.Unmarshaler. +func (j *Metadata) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") + } + type Plain Metadata + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) + } + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) + } + *j = Metadata(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + } + *j = SpecDistributionModulesNetworkingType(v) + return nil +} const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" ) -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworking struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus + // in Agent mode (no alerting, no queries, no storage), and all the exporters + // needed to get metrics for the status of the cluster and the workloads. Useful + // when having a centralized (remote) Prometheus where to ship the metrics and not + // storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } -type SpecDistributionModulesNetworking struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + } + type Plain SpecDistributionModulesMonitoring + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesMonitoring(plain) + return nil } type SpecDistributionModulesNetworkingTigeraOperator struct { @@ -1253,49 +1853,143 @@ type SpecDistributionModulesPolicyGatekeeper struct { // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) + return nil +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil +} const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil +} type SpecDistributionModulesPolicyType string +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + } + *j = SpecDistributionModulesPolicyType(v) + return nil +} + const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" @@ -1308,13 +2002,40 @@ type SpecDistributionModulesTracing struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + } + type Plain SpecDistributionModulesPolicy + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicy(plain) + return nil +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` // The type of tracing to use, either ***none*** or ***tempo*** Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1322,7 +2043,7 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1351,24 +2072,25 @@ type SpecDistributionModulesTracingTempo struct { type SpecDistributionModulesTracingTempoBackend string const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1486,8 +2208,8 @@ type SpecKubernetes struct { // pools unless overridden by a specific node pool. NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` - // NodePools corresponds to the JSON schema field "nodePools". - NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` // Either `launch_configurations`, `launch_templates` or `both`. For new clusters // use `launch_templates`, for existing cluster you'll need to migrate from @@ -1615,9 +2337,9 @@ type SpecKubernetesNodePool struct { Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { - // CidrBlocks corresponds to the JSON schema field "cidrBlocks". - CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` +type SpecInfrastructureVpcNetwork struct { + // The network CIDR for the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // Name corresponds to the JSON schema field "name". Name string `json:"name" yaml:"name" mapstructure:"name"` @@ -1814,13 +2536,11 @@ type SpecPlugins struct { // Helm corresponds to the JSON schema field "helm". Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} + // The username of the account to create in the bastion's operating system. + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` + // The port where each OpenVPN server will listen for connections. + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Repositories corresponds to the JSON schema field "repositories". Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` @@ -1843,40 +2563,30 @@ type SpecPluginsHelmReleases []struct { // Set corresponds to the JSON schema field "set". Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` -} - -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` -} - -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - - // The name of the kustomize plugin - Name string `json:"name" yaml:"name" mapstructure:"name"` + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpn(plain) + return nil } type SpecToolsConfigurationTerraform struct { @@ -2122,6 +2832,20 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Port range for the Firewall Rule. +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2143,6 +2867,17 @@ func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error return nil } +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string @@ -2163,6 +2898,32 @@ func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 3db9f6e1d..e44fe614d 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -10,7 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -30,7 +30,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -38,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -50,14 +53,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -72,29 +76,35 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). - // - // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // (Default is `registry.sighup.io/fury`). Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -296,8 +306,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -313,11 +326,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -335,25 +362,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -478,15 +509,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -537,11 +576,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -567,10 +611,10 @@ type SpecDistributionModulesDrVelero struct { } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -625,12 +669,15 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD ingresses. If in the nginx `dual` + // configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Dns corresponds to the JSON schema field "dns". @@ -639,13 +686,17 @@ type SpecDistributionModulesIngress struct { // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -654,17 +705,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -675,6 +730,8 @@ const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -686,19 +743,23 @@ type SpecDistributionModulesIngressDNS struct { Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -714,14 +775,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -736,15 +807,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -756,14 +830,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -772,6 +849,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -794,79 +872,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -892,23 +978,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -916,15 +1004,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -935,10 +1023,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -949,6 +1038,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -963,7 +1053,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -992,12 +1082,12 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // instace, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus @@ -1005,9 +1095,10 @@ type SpecDistributionModulesMonitoring struct { // needed to get metrics for the status of the cluster and the workloads. Useful // when having a centralized (remote) Prometheus where to ship the metrics and not // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1015,14 +1106,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1061,17 +1153,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1082,23 +1179,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // External S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1106,15 +1205,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1131,13 +1230,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1173,9 +1272,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1186,6 +1286,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1196,20 +1297,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1224,18 +1332,22 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } @@ -1254,6 +1366,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1264,10 +1377,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1275,29 +1392,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1308,20 +1428,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // External S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1333,88 +1454,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // Network CIRDs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created + // Network CIDRs for the public subnets, where the public load balancers and the + // VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1422,17 +1553,20 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` // Global default AMI type used for EKS worker nodes. This will apply to all node @@ -1442,55 +1576,62 @@ type SpecKubernetes struct { // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting existing cluster you'll need + // to migrate from `launch_configurations` to `launch_templates` using `both` as + // interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1516,6 +1657,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1526,6 +1669,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1535,16 +1680,17 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` // The name of the node pool. @@ -1553,13 +1699,13 @@ type SpecKubernetesNodePool struct { // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` // The type of Node Pool, can be `self-managed` for using customization like @@ -1581,10 +1727,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1595,6 +1742,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1604,22 +1752,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1631,22 +1780,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1657,9 +1807,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1715,19 +1867,23 @@ const ( ) type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1741,10 +1897,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1837,24 +1993,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -2006,29 +2164,29 @@ var enumValues_SpecDistributionModulesPolicyType = []interface{}{ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) } - type Plain SpecDistributionModules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) } - *j = SpecDistributionModules(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } @@ -2440,14 +2598,6 @@ var enumValues_SpecDistributionModulesLoggingType = []interface{}{ "customOutputs", } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string @@ -2486,6 +2636,12 @@ func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error return nil } +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string @@ -3245,22 +3401,22 @@ const ( ) // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = TypesAwsRegion(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } @@ -3344,22 +3500,22 @@ type TypesAwsSubnetId string type TypesKubeTaints []string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesLoggingType(v) return nil } @@ -3484,58 +3640,73 @@ type TypesFuryModuleComponentOverridesWithIAMRoleName struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesAuth(plain) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecDistributionModulesAuthProvider + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + } + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + } + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecKubernetesAwsAuthUser(plain) return nil } @@ -3594,62 +3765,61 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesAuthOverridesIngress + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + } + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecKubernetesAPIServer(plain) return nil } @@ -3662,42 +3832,44 @@ type TypesFuryModuleComponentOverrides struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } type TypesAwsS3KeyPrefix string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = TypesKubeTolerationOperator_1(v) return nil } @@ -3750,20 +3922,26 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + } + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } @@ -3806,22 +3984,22 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } @@ -3865,62 +4043,44 @@ var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain TypesKubeToleration + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationOperator(v) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } @@ -3929,31 +4089,24 @@ var enumValues_TypesKubeTolerationOperator = []interface{}{ "Equal", } -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - *j = TypesKubeTolerationEffect(v) + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } @@ -4139,20 +4292,20 @@ type TypesSshPubKey string type TypesUri string // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionCommonProvider + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCommonProvider(plain) + *j = SpecDistributionModulesTracing(plain) return nil } diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index e1a3f89cc..b5b6d4032 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -10,6 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) +// KFD modules deployed on top of an existing Kubernetes cluster. type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -29,7 +30,8 @@ type KfddistributionKfdV1Alpha2Kind string const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -37,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Plugins corresponds to the JSON schema field "plugins". @@ -51,36 +55,45 @@ type SpecDistribution struct { // CustomPatches corresponds to the JSON schema field "customPatches". CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - // The kubeconfig file path + // The path to the kubeconfig file. Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` // Modules corresponds to the JSON schema field "modules". Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -279,8 +292,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, + // Dex). Notice that when nginx type is dual, these will use the `external` + // ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -296,11 +312,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -318,22 +348,25 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } @@ -461,15 +494,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -481,11 +522,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -499,6 +545,7 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { // The storage backend type for Velero. `minio` will use an in-cluster MinIO // deployment for object storage, `externalEndpoint` can be used to point to an @@ -602,24 +649,31 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx dual type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -628,17 +682,21 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // List of challenge solvers to use instead of the default one for the `http01` + // challenge. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // KFDDistribution kind. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -658,14 +716,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -680,15 +748,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -700,14 +771,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -716,6 +790,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -738,79 +813,87 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but with + // no local storage, you will have to create the needed Outputs and ClusterOutputs + // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -836,23 +919,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -860,15 +945,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -879,10 +964,11 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -893,6 +979,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1582,6 +1669,13 @@ func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) e return nil } +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string @@ -1669,6 +1763,61 @@ func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b return nil } +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string @@ -2133,22 +2282,22 @@ func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } @@ -2769,6 +2918,81 @@ type TypesIpAddress string type TypesKubeLabels_1 map[string]string +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} +type TypesEnvRef string + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} + +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) + +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} +type TypesFileRef string + +type TypesIpAddress string + +type TypesKubeLabels_1 map[string]string + type TypesKubeTaints []string type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 88946d9ca..b3f3b16ed 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -16,6 +16,7 @@ type Metadata struct { Name string `json:"name" yaml:"name" mapstructure:"name"` } +// A KFD Cluster deployed on top of a set of existing VMs. type OnpremisesKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -40,7 +41,7 @@ type Spec struct { // Defines which KFD version will be installed and, in consequence, the Kubernetes // version used to create the cluster. It supports git tags and branches. Example: - // v1.30.1. + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Kubernetes corresponds to the JSON schema field "kubernetes". @@ -68,7 +69,7 @@ type SpecDistributionCommon struct { NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` // The node selector to use to place the pods for all the KFD modules. Follows - // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". @@ -76,6 +77,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -547,6 +551,8 @@ type SpecDistributionModulesAuthProvider struct { // and require authentication before accessing them. // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } @@ -575,6 +581,8 @@ type SpecDistributionModulesDr struct { // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` // disables the module and `on-premises` will install Velero and an optional MinIO // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -709,7 +717,7 @@ type SpecDistributionModulesIngress struct { // If corresponds to the JSON schema field "if". If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` - // Configurations for the nginx ingress controller package. + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". @@ -737,7 +745,7 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer + // Name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // List of challenge solvers to use instead of the default one for the `http01` @@ -765,7 +773,7 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, options are: + // The type of the Ingress nginx controller, options are: // - `none`: no ingress controller will be installed and no infrastructural // ingresses will be created. // - `single`: a single ingress controller with ingress class `nginx` will be @@ -775,6 +783,8 @@ type SpecDistributionModulesIngressNginx struct { // `internal` ingress class intended for private ingresses and one for the // `external` ingress class intended for public ingresses. KFD infrastructural // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } @@ -824,11 +834,11 @@ type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // Set to override the node selector used to place the pods of the Ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Set to override the tolerations that will be added to the pods of the Ingress - // module + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -869,10 +879,12 @@ type SpecDistributionModulesLogging struct { // - `customOuputs`: the Logging Operator will be deployed and installed but with // no local storage, you will have to create the needed Outputs and ClusterOutputs // to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } -// DEPRECATED in latest versions of KFD. +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1083,6 +1095,8 @@ type SpecDistributionModulesMonitoring struct { // storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1091,7 +1105,7 @@ type SpecDistributionModulesMonitoring struct { type SpecDistributionModulesMonitoringAlertManager struct { // The webhook URL to send dead man's switch monitoring, for example to use with - // healthchecks.io + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` // Set to false to avoid installing the Prometheus rules (alerts) included with diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 538188105..5e6c07b26 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -424,6 +429,7 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { @@ -916,6 +922,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -1183,7 +1190,8 @@ "description": "Configurations for the nginx ingress controller module" }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1280,6 +1288,7 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" @@ -1326,7 +1335,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1350,7 +1359,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1375,16 +1384,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1457,6 +1469,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1513,6 +1526,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1525,7 +1539,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1623,6 +1637,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1633,6 +1648,7 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", @@ -1659,10 +1675,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1671,26 +1689,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1768,7 +1787,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1778,7 +1797,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1863,7 +1882,10 @@ "type": "object", "additionalProperties": false, "properties": { - "deadManSwitchWebhookUrl": { + "resources": { + "$ref": "#/$defs/Types.KubeResources" + }, + "retentionTime": { "type": "string", "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" }, @@ -1871,7 +1893,7 @@ "type": "boolean", "description": "If true, the default rules will be installed" }, - "slackWebhookUrl": { + "storageSize": { "type": "string", "description": "The slack webhook url to send alerts" } @@ -1924,10 +1946,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1935,31 +1958,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1970,11 +1994,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1982,11 +2007,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1998,6 +2023,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2070,6 +2096,7 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { @@ -2098,9 +2125,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2125,6 +2153,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2136,7 +2165,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2182,6 +2211,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2197,11 +2227,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2215,13 +2245,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2229,11 +2260,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2373,6 +2404,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2526,6 +2558,7 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", @@ -2872,6 +2905,41 @@ "Types.FuryModuleOverrides": { "type": "object", "additionalProperties": false, + "properties": { + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU request for the Pod, in cores. Example: `500m`." + }, + "memory": { + "type": "string", + "description": "The memory request for the Pod. Example: `500M`." + } + } + }, + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string", + "description": "The CPU limit for the Pod. Example: `1000m`." + }, + "memory": { + "type": "string", + "description": "The memory limit for the Pod. Example: `1G`." + } + } + } + } + }, + "Types.FuryModuleOverrides": { + "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", + "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", @@ -2944,11 +3012,11 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 7c0f91e64..dae6fd51e 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,7 +478,7 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -482,7 +490,32 @@ }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -522,7 +555,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -530,7 +563,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -538,11 +571,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -553,6 +586,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", @@ -575,7 +609,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -588,26 +622,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -717,21 +751,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -740,7 +776,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -754,12 +791,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -770,6 +807,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -777,7 +815,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -804,13 +843,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -840,7 +881,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -848,19 +889,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -880,7 +921,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -888,19 +929,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -916,6 +957,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -932,6 +974,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -939,21 +982,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1090,28 +1133,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." } } }, @@ -1121,7 +1165,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1176,14 +1220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1275,20 +1320,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1321,7 +1367,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1345,7 +1391,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1370,16 +1416,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1391,6 +1440,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1406,15 +1456,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1422,11 +1473,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -1448,6 +1499,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1467,11 +1519,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1481,15 +1533,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." } }, "required": [ @@ -1500,6 +1553,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1512,7 +1566,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1591,14 +1645,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1610,6 +1664,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1620,10 +1675,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1631,11 +1687,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1646,10 +1702,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1658,26 +1716,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1697,6 +1756,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1704,41 +1764,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1755,7 +1815,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1765,7 +1825,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1811,15 +1871,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1852,15 +1912,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1911,10 +1971,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1922,31 +1983,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1957,11 +2019,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1969,11 +2032,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1985,6 +2048,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1995,7 +2059,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -2011,10 +2075,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -2022,31 +2087,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -2057,11 +2123,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -2069,11 +2136,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -2085,9 +2152,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2106,6 +2174,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2117,7 +2186,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2163,6 +2232,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2178,11 +2248,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2196,13 +2266,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2210,11 +2281,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2228,6 +2299,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2238,7 +2310,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2334,12 +2406,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." } }, "required": [ @@ -2350,6 +2422,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2359,7 +2432,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2438,10 +2511,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2451,10 +2525,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2467,11 +2542,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2490,7 +2565,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2503,14 +2578,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2524,14 +2600,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2810,11 +2887,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2824,11 +2901,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2836,11 +2913,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2850,7 +2928,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2866,7 +2944,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2876,7 +2954,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2886,7 +2964,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2896,7 +2974,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2909,15 +2987,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index cd7c39b75..80cf9d6b9 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "KFD modules deployed on top of an existing Kubernetes cluster.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "distribution": { @@ -68,7 +70,7 @@ "properties": { "kubeconfig": { "type": "string", - "description": "The kubeconfig file path" + "description": "The path to the kubeconfig file." }, "common": { "$ref": "#/$defs/Spec.Distribution.Common" @@ -134,28 +136,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -165,7 +168,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -217,14 +220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -258,20 +262,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -304,7 +309,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -328,7 +333,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -353,16 +358,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -374,6 +382,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -389,26 +398,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." } }, "required": [ @@ -431,6 +441,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -443,7 +454,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -522,14 +533,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -541,6 +552,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -551,10 +563,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -562,11 +575,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -577,10 +590,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -589,26 +604,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -628,6 +644,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -635,41 +652,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -686,7 +703,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -696,7 +713,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -742,15 +759,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the K8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -783,15 +800,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -842,10 +859,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -853,31 +871,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "External S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -888,11 +907,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -900,11 +920,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -916,6 +936,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -926,7 +947,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -942,10 +963,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -953,31 +975,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "External S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -988,11 +1011,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1000,11 +1024,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1016,6 +1040,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1033,7 +1058,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`." } }, "required": [ @@ -1070,10 +1095,12 @@ "additionalProperties": false, "properties": { "podCidr": { - "$ref": "#/$defs/Types.Cidr" + "$ref": "#/$defs/Types.Cidr", + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { - "type": "string" + "type": "string", + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1087,6 +1114,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1098,7 +1126,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1144,6 +1172,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1159,11 +1188,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1177,13 +1206,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -1191,11 +1221,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1209,6 +1239,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1219,7 +1250,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1245,6 +1276,7 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { "backend": { "type": "string", @@ -1352,6 +1384,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1361,7 +1394,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1440,10 +1473,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1453,7 +1487,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", @@ -1469,11 +1503,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1492,7 +1526,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1505,14 +1539,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1526,14 +1561,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -1597,11 +1633,29 @@ } }, "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "effect", + "key", + "value" + ] }, "Types.KubeNodeSelector": { "type": [ @@ -1667,11 +1721,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -1681,11 +1735,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -1693,11 +1747,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the security module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -1707,7 +1762,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -1723,7 +1778,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -1733,7 +1788,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -1743,15 +1798,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 44af1db96..26c3f87fc 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "A KFD Cluster deployed on top of a set of existing VMs.", "type": "object", "properties": { "apiVersion": { @@ -49,7 +49,7 @@ "properties": { "distributionVersion": { "type": "string", - "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "kubernetes": { @@ -708,7 +708,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", @@ -726,7 +726,7 @@ }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." }, "networkPoliciesEnabled": { "type": "boolean", @@ -796,7 +796,7 @@ }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller package." + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", @@ -841,14 +841,14 @@ }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "Set to override the node selector used to place the pods of the Ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "Set to override the tolerations that will be added to the pods of the Ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -881,7 +881,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type." + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -974,7 +974,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer" + "description": "Name of the clusterIssuer." }, "email": { "type": "string", @@ -1026,7 +1026,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1124,7 +1124,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", - "description": "DEPRECATED in latest versions of KFD.", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1285,7 +1285,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1372,7 +1372,7 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", @@ -1519,7 +1519,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment." + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1677,7 +1677,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`." + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1801,7 +1801,7 @@ "none", "on-premises" ], - "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment." + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2100,7 +2100,7 @@ "basicAuth", "sso" ], - "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication." + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2357,11 +2357,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the loki pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the prometheus pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2371,11 +2371,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the prometheus pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index f823ad075..3dd175a5d 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -146,7 +146,7 @@ spec: # to: 80 # # Additional AWS tags # tags: {} - # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations + # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information. awsAuth: {} # additionalAccounts: # - "777777777777" @@ -212,7 +212,7 @@ spec: # - http01: # ingress: # class: nginx - # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission + # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission. dns: # the public DNS zone definition public: From 1387e336e340408aa74e53e6f97e4b26d7b56263 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 20 Nov 2024 17:18:11 +0100 Subject: [PATCH 117/160] docs(schemas): apply suggestions from code review Co-authored-by: Riccardo Cannella --- schemas/public/ekscluster-kfd-v1alpha2.json | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index dae6fd51e..0ceb30def 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -321,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -478,7 +478,7 @@ "launch_templates", "both" ], - "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -1460,7 +1460,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer." + "description": "The name of the clusterIssuer." }, "email": { "type": "string", @@ -1477,7 +1477,7 @@ }, "solvers": { "type": "array", - "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -1566,7 +1566,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed without local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1825,7 +1825,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1992,7 +1992,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Mimir's storage." + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", @@ -2096,7 +2096,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Tempo's storage." + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", @@ -2432,7 +2432,7 @@ }, "baseDomain": { "type": "string", - "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" From 3a4c9738ab0e2c7e1bdcc769d432ad1a31eac34a Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 20 Nov 2024 17:43:27 +0100 Subject: [PATCH 118/160] docs(schemas): port suggestions to the other schemas --- schemas/public/ekscluster-kfd-v1alpha2.json | 6 +++--- .../public/kfddistribution-kfd-v1alpha2.json | 17 +++++++++-------- schemas/public/onpremises-kfd-v1alpha2.json | 18 +++++++++--------- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 0ceb30def..f42c465e4 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1155,7 +1155,7 @@ }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -1220,7 +1220,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", @@ -1566,7 +1566,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed without local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 80cf9d6b9..382187e1a 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -220,7 +220,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", @@ -402,7 +402,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer." + "description": "The name of the clusterIssuer." }, "email": { "type": "string", @@ -418,7 +418,7 @@ }, "solvers": { "type": "array", - "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -454,7 +454,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -713,7 +713,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -880,7 +880,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Mimir's storage." + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", @@ -984,7 +984,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Tempo's storage." + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", @@ -1394,7 +1394,7 @@ }, "baseDomain": { "type": "string", - "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1491,6 +1491,7 @@ }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 26c3f87fc..f9852eb91 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -792,7 +792,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", @@ -974,7 +974,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer." + "description": "The name of the clusterIssuer." }, "email": { "type": "string", @@ -990,7 +990,7 @@ }, "solvers": { "type": "array", - "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -1026,7 +1026,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1285,7 +1285,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1452,7 +1452,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Mimir's storage." + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", @@ -1556,7 +1556,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Tempo's storage." + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", @@ -1629,7 +1629,7 @@ "calico", "cilium" ], - "description": "The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`." + "description": "The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`." } }, "required": [ @@ -1945,7 +1945,7 @@ }, "baseDomain": { "type": "string", - "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" From 7ef4a82230b054ab6301b3aaa0f06b5aa4acdf07 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 20 Nov 2024 17:43:27 +0100 Subject: [PATCH 119/160] docs(schemas): port suggestions to the other schemas --- docs/schemas/ekscluster-kfd-v1alpha2.md | 28 +- docs/schemas/kfddistribution-kfd-v1alpha2.md | 24 +- docs/schemas/onpremises-kfd-v1alpha2.md | 22 +- .../ekscluster/v1alpha2/private/schema.go | 68 +- pkg/apis/ekscluster/v1alpha2/public/schema.go | 58 +- .../kfddistribution/v1alpha2/public/schema.go | 201 +++++- pkg/apis/onpremises/v1alpha2/public/schema.go | 551 ++++++++++++---- schemas/private/ekscluster-kfd-v1alpha2.json | 604 ++++++++++++------ 8 files changed, 1133 insertions(+), 423 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index e028f2a70..453eb6cdf 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -132,6 +132,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY. URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. + ## .spec.distribution.common.relativeVendorPath ### Description @@ -551,7 +553,7 @@ Configuration for the Auth module. ### Description -Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -1733,7 +1735,7 @@ Whether to install or not the default `manifests` and `full` backups schedules. ### Description -The base domain used for all the KFD ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. +The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1773,13 +1775,13 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer. +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -List of challenge solvers to use instead of the default one for the `http01` challenge. +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type @@ -3032,8 +3034,8 @@ The value of the toleration Selects the logging stack. Options are: - `none`: will disable the centralized logging. - `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. -- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. -- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. Default is `opensearch`. @@ -3401,7 +3403,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Mimir's storage. +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure @@ -3828,8 +3830,8 @@ The memory request for the Pod. Example: `500M`. The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. Default is `prometheus`. @@ -4651,7 +4653,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Tempo's storage. +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure @@ -4826,7 +4828,7 @@ Network CIDRS configuration for private and public subnets. ### Description -Network CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4842,7 +4844,7 @@ Network CIRDs for the private subnets, where the nodes, the pods, and the privat ### Description -Network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -5771,7 +5773,7 @@ The type of Node Pool, can be `self-managed` for using customization like custom ### Description -Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 095a35e79..8b950ab96 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -554,7 +554,7 @@ Configuration for the Auth module. ### Description -Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -696,6 +696,10 @@ Override the common configuration with a particular configuration for the Auth m ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description @@ -1344,7 +1348,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1384,13 +1388,13 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer. +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -List of challenge solvers to use instead of the default one for the `http01` challenge. +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type @@ -2517,8 +2521,8 @@ The value of the toleration Selects the logging stack. Options are: - `none`: will disable the centralized logging. - `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. -- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. -- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. Default is `opensearch`. @@ -2886,7 +2890,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Mimir's storage. +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure @@ -3313,8 +3317,8 @@ The memory request for the Pod. Example: `500M`. The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. Default is `prometheus`. @@ -4251,7 +4255,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Tempo's storage. +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index f3b0f827a..d4c4437c1 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -556,7 +556,7 @@ Configuration for the Auth module. ### Description -Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -1470,7 +1470,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1510,13 +1510,13 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer. +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -List of challenge solvers to use instead of the default one for the `http01` challenge. +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type @@ -2645,8 +2645,8 @@ The value of the toleration Selects the logging stack. Options are: - `none`: will disable the centralized logging. - `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. -- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. -- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. Default is `opensearch`. @@ -3014,7 +3014,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Mimir's storage. +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure @@ -3441,8 +3441,8 @@ The memory request for the Pod. Example: `500M`. The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. Default is `prometheus`. @@ -3792,7 +3792,7 @@ The value of the toleration ### Description -The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`. +The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`. ### Constraints @@ -4378,7 +4378,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Tempo's storage. +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index b4117edb8..8fa711d7c 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -82,6 +82,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -341,7 +344,9 @@ type SpecDistributionModulesAuthDexExpiry struct { } type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -911,9 +916,9 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` // CertManager corresponds to the JSON schema field "certManager". @@ -947,13 +952,16 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email of the cluster issuer Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // Route53 corresponds to the JSON schema field "route53". Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` // The type of the cluster issuer, must be ***dns01*** or ***http01*** @@ -1321,10 +1329,14 @@ type SpecDistributionModulesIngress struct { // selects the logging stack. Choosing none will disable the centralized logging. // Choosing opensearch will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1533,16 +1545,18 @@ type SpecDistributionModulesMonitoring struct { // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1626,7 +1640,7 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Mimir's storage. + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -2084,7 +2098,7 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Tempo's storage. + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -2123,11 +2137,11 @@ type SpecInfrastructureVpcNetwork struct { } type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // The network CIDRs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and + // The network CIDRs for the public subnets, where the public load balancers and // the VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } @@ -2208,12 +2222,10 @@ type SpecKubernetes struct { // pools unless overridden by a specific node pool. NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` - // Ingress corresponds to the JSON schema field "ingress". - Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` - - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` // This value defines the CIDR that will be used to assign IP addresses to the diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index e44fe614d..665c9645d 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -87,6 +87,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -308,9 +311,9 @@ type SpecDistributionModules struct { // Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, - // Dex). Notice that when nginx type is dual, these will use the `external` - // ingress class. + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -669,8 +672,8 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { } type SpecDistributionModulesIngress struct { - // The base domain used for all the KFD ingresses. If in the nginx `dual` - // configuration type, this value should be the same as the + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` @@ -711,11 +714,13 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer. + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // List of challenge solvers to use instead of the default one for the `http01` - // challenge. + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` // The type of the clusterIssuer, must be `dns01` for using DNS challenge or @@ -877,10 +882,11 @@ type SpecDistributionModulesLogging struct { // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be // stored. - // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - // - `customOuputs`: the Logging Operator will be deployed and installed but with - // no local storage, you will have to create the needed Outputs and ClusterOutputs - // to ship the logs to your desired storage. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. // // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` @@ -1087,14 +1093,14 @@ type SpecDistributionModulesMonitoring struct { // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. // @@ -1187,7 +1193,7 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Mimir's storage. + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -1436,7 +1442,7 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Tempo's storage. + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -1479,12 +1485,12 @@ type SpecInfrastructureVpcNetwork struct { // Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // Network CIRDs for the private subnets, where the nodes, the pods, and the + // The network CIDRs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // Network CIDRs for the public subnets, where the public load balancers and the - // VPN servers will be created + // The network CIDRs for the public subnets, where the public load balancers and + // the VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } @@ -1577,9 +1583,9 @@ type SpecKubernetes struct { NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` // Accepted values are `launch_configurations`, `launch_templates` or `both`. For - // new clusters use `launch_templates`, for adopting existing cluster you'll need - // to migrate from `launch_configurations` to `launch_templates` using `both` as - // interim. + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` // This value defines the network CIDR that will be used to assign IP addresses to diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index b5b6d4032..56ce77f03 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -294,9 +294,9 @@ type SpecDistributionModules struct { // Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, - // Dex). Notice that when nginx type is dual, these will use the `external` - // ingress class. + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -351,7 +351,7 @@ type SpecDistributionModulesAuthDexExpiry struct { // Override the common configuration with a particular configuration for the Auth // module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` // Set to override the node selector used to place the pods of the Auth module. @@ -370,6 +370,7 @@ type SpecDistributionModulesAuthOverridesIngress struct { IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -650,7 +651,7 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { type SpecDistributionModulesIngress struct { // The base domain used for all the KFD infrastructural ingresses. If using the - // nginx dual type, this value should be the same as the domain associated with + // nginx `dual` type, this value should be the same as the domain associated with // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` @@ -688,11 +689,13 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer. + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // List of challenge solvers to use instead of the default one for the `http01` - // challenge. + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` // The type of the clusterIssuer. Only `http01` challenge is supported for @@ -818,10 +821,11 @@ type SpecDistributionModulesLogging struct { // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be // stored. - // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - // - `customOuputs`: the Logging Operator will be deployed and installed but with - // no local storage, you will have to create the needed Outputs and ClusterOutputs - // to ship the logs to your desired storage. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. // // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` @@ -1838,7 +1842,112 @@ func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJS return nil } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ "none", "calico", "cilium", @@ -1864,8 +1973,68 @@ func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2360,7 +2529,7 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { // The bucket name of the external tempo backend BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, the external tempo backend will not use tls diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index b3f3b16ed..6731d7450 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -298,9 +298,9 @@ type SpecDistributionModules struct { // Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, - // Dex). Notice that when nginx type is dual, these will use the `external` - // ingress class. + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -701,7 +701,7 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { type SpecDistributionModulesIngress struct { // The base domain used for all the KFD infrastructural ingresses. If using the - // nginx dual type, this value should be the same as the domain associated with + // nginx `dual` type, this value should be the same as the domain associated with // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` @@ -745,11 +745,13 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer. + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // List of challenge solvers to use instead of the default one for the `http01` - // challenge. + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` // The type of the clusterIssuer. Only `http01` challenge is supported for @@ -875,10 +877,11 @@ type SpecDistributionModulesLogging struct { // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be // stored. - // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - // - `customOuputs`: the Logging Operator will be deployed and installed but with - // no local storage, you will have to create the needed Outputs and ClusterOutputs - // to ship the logs to your desired storage. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. // // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` @@ -1085,14 +1088,14 @@ type SpecDistributionModulesMonitoring struct { // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. // @@ -1185,7 +1188,7 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Mimir's storage. + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -1195,110 +1198,430 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) - } - *j = SpecDistributionModulesTracingType(v) - return nil +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngress(plain) - return nil +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") - } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringType string + +const ( + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. + // Default is `calico`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +} + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +const ( + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +} + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +} + +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +type SpecDistributionModulesPolicyType string + +const ( + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingTempoBackend string + +const ( + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type SpecDistributionModulesTracingType string + +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Defines the Kubernetes components configuration and the values needed for the +// kubernetes phase of furyctl. +type SpecKubernetes struct { + // Advanced corresponds to the JSON schema field "advanced". + Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` + + // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". + AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` + + // The address for the Kubernetes control plane. Usually a DNS entry pointing to a + // Load Balancer on port 6443. + ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` + + // The DNS zone of the machines. It will be appended to the name of each host to + // generate the `kubernetes_hostname` in the Ansible inventory file. It is also + // used to calculate etcd's initial cluster value. + DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` + + // LoadBalancers corresponds to the JSON schema field "loadBalancers". + LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` + + // Masters corresponds to the JSON schema field "masters". + Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` + + // Nodes corresponds to the JSON schema field "nodes". + Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` + + // The path to the folder where the PKI files for Kubernetes and etcd are stored. + PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` + + // The subnet CIDR to use for the Pods network. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` + + // Proxy corresponds to the JSON schema field "proxy". + Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The subnet CIDR to use for the Services network. + SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` +} + +type SpecKubernetesAdvanced struct { + // AirGap corresponds to the JSON schema field "airGap". + AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` + + // Cloud corresponds to the JSON schema field "cloud". + Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` + + // Containerd corresponds to the JSON schema field "containerd". + Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` + + // Encryption corresponds to the JSON schema field "encryption". + Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` + + // Oidc corresponds to the JSON schema field "oidc". + Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` + + // URL of the registry where to pull images from for the Kubernetes phase. + // (Default is registry.sighup.io/fury/on-premises). + Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` + + // Users corresponds to the JSON schema field "users". + Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +// Advanced configuration for air-gapped installations. Allows setting custom URLs +// where to download the binaries dependencies from and custom .deb and .rpm +// package repositories. +type SpecKubernetesAdvancedAirGap struct { + // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should + // be as the one downloaded from containerd GitHub releases page. + ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` + + // DependenciesOverride corresponds to the JSON schema field + // "dependenciesOverride". + DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` + + // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded + // from + // `//etcd--linux-.tar.gz` + EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` + + // Checksum for the runc binary. + RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` + + // URL where to download the runc binary from. + RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` +} + +type SpecKubernetesAdvancedAirGapDependenciesOverride struct { + // Apt corresponds to the JSON schema field "apt". + Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` + + // Yum corresponds to the JSON schema field "yum". + Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` +} + +type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { + // URL where to download the GPG key of the Apt repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // The GPG key ID of the Apt repository. Example: + // `36A1D7869245C8950F966E92D8576A8BA88D21E9` + GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` + + // An indicative name for the Apt repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // A source string for the new Apt repository. Example: `deb + // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` +} + +type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { + // URL where to download the ASCII-armored GPG key of the Yum repository. Example: + // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` + GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` + + // If true, the GPG signature check on the packages will be enabled. + GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` + + // An indicative name for the Yum repository. Example: `k8s-1.29` + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // URL to the directory where the Yum repository's `repodata` directory lives. + // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` + Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` + + // If true, the GPG signature check on the `repodata` will be enabled. + RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +} + +type SpecKubernetesAdvancedAnsible struct { + // Additional configuration to append to the ansible.cfg file + Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // The Python interpreter to use for running Ansible. Example: python3 + PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 5e6c07b26..072f7e7ba 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -357,38 +357,11 @@ }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" - }, - "dhParamsBits": { - "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" - }, - "vpnClientsSubnetCidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" - }, - "ssh": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" - }, - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" - }, - "bucketNamePrefix": { - "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" - }, - "iamUserNameOverride": { - "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } - }, - "required": [ - "ssh", - "vpnClientsSubnetCidr" - ] + } }, - "Spec.Infrastructure.Vpn.Ssh": { + "Spec.Distribution.Common.Provider": { "type": "object", "additionalProperties": false, "properties": { @@ -563,50 +536,7 @@ "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", - "enum": [ - "eks-managed", - "self-managed" - ] - }, - "name": { - "type": "string", - "description": "The name of the node pool." - }, - "ami": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" - }, - "containerRuntime": { - "type": "string", - "enum": [ - "docker", - "containerd" - ], - "description": "The container runtime to use for the nodes" - }, - "size": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" - }, - "instance": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" - }, - "attachedTargetGroups": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsArn" - }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" - }, - "labels": { - "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" - }, - "taints": { - "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" - }, - "tags": { - "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "subnetIds": { "type": "array", @@ -1183,11 +1113,7 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" - }, - "nginx": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", @@ -1209,82 +1135,140 @@ "baseDomain", "nginx" ], - "allOf": [ - { - "if": { - "properties": { - "nginx": { - "properties": { - "type": { - "const": "dual" - } - } - } - } - }, - "then": { - "required": [ - "dns" - ], - "properties": { - "dns": { - "required": [ - "public", - "private" - ] - } - } - } + "then": { + "required": [ + "certManager" + ] + }, + "type": "object" + }, + "Spec.Distribution.Modules.Ingress.CertManager": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", + "properties": { + "clusterIssuer": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "clusterIssuer" + ] + }, + "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { + "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", + "oneOf": [ { - "if": { - "properties": { - "nginx": { - "properties": { - "type": { - "const": "single" - } - } - } - } - }, - "then": { - "required": [ - "dns" - ], - "properties": { - "dns": { - "required": [ - "public" - ] - } - } - } + "required": [ + "type" + ] }, { - "if": { - "properties": { - "nginx": { - "properties": { - "tls": { - "properties": { - "provider": { - "const": "certManager" - } - } - } - } - } - } - }, - "then": { - "required": [ - "certManager" - ] - } + "required": [ + "solvers" + ] } + ], + "properties": { + "email": { + "type": "string", + "format": "email", + "description": "The email address to use during the certificate issuing process." + }, + "name": { + "type": "string", + "description": "The name of the clusterIssuer." + }, + "route53": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" + }, + "solvers": { + "type": "array", + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." + }, + "type": { + "type": "string", + "enum": [ + "dns01", + "http01" + ], + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." + } + }, + "required": [ + "route53", + "name", + "email" + ], + "type": "object" + }, + "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { + "type": "object", + "additionalProperties": false, + "properties": { + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" + }, + "region": { + "$ref": "#/$defs/Types.AwsRegion" + }, + "hostedZoneId": { + "type": "string" + } + }, + "required": [ + "hostedZoneId", + "iamRoleArn", + "region" ] }, + "Spec.Distribution.Modules.Ingress.DNS": { + "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", + "additionalProperties": false, + "properties": { + "public": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" + }, + "private": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + }, + "required": [ + "public", + "private" + ] + }, + "Spec.Distribution.Modules.Ingress.DNS.Private": { + "additionalProperties": false, + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", + "properties": { + "create": { + "type": "boolean", + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." + }, + "name": { + "type": "string", + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." + }, + "vpcId": { + "type": "string" + } + }, + "required": [ + "vpcId", + "name", + "create" + ], + "type": "object" + }, "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, @@ -1539,7 +1523,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1797,7 +1781,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1967,7 +1951,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Mimir's storage." + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", @@ -2320,50 +2304,149 @@ "additionalProperties": false, "description": "Configuration for Velero's backup schedules.", "properties": { - "install": { + "username": { + "type": "string", + "description": "The username for the default MinIO root user." + }, + "password": { + "type": "string", + "description": "The password for the default MinIO root user." + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Distribution.Modules.Tracing.Tempo": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for the Tempo package.", + "properties": { + "retentionTime": { + "type": "string", + "description": "The retention time for the traces stored in Tempo." + }, + "backend": { + "type": "string", + "enum": [ + "minio", + "externalEndpoint" + ], + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." + }, + "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", + "type": "object", + "additionalProperties": false, + "properties": { + "endpoint": { + "type": "string", + "description": "The external S3-compatible endpoint for Tempo's storage." + }, + "insecure": { "type": "boolean", - "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." + "description": "If true, will use HTTP as protocol instead of HTTPS." + }, + "secretAccessKey": { + "type": "string", + "description": "The secret access key (password) for the external S3-compatible bucket." + }, + "accessKeyId": { + "type": "string", + "description": "The access key ID (username) for the external S3-compatible bucket." }, - "definitions": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for Velero schedules.", - "properties": { - "manifests": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for Velero's manifests backup schedule.", + "bucketName": { + "type": "string", + "description": "The bucket name of the external S3-compatible object storage." + } + } + }, + "overrides": { + "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + } + } + }, + "Spec.Infrastructure": { + "type": "object", + "additionalProperties": false, + "properties": { + "vpc": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc" + }, + "vpn": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn" + } + }, + "allOf": [ + { + "if": { + "allOf": [ + { + "properties": { + "vpc": { + "type": "null" + } + } + }, + { + "not": { + "properties": { + "vpn": { + "type": "null" + } + } + } + } + ] + }, + "then": { + "properties": { + "vpn": { + "required": [ + "vpcId" + ] + } + } + } + }, + { + "if": { + "allOf": [ + { + "not": { "properties": { - "schedule": { - "type": "string", - "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + "vpc": { + "type": "null" } } - }, - "full": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for Velero's manifests backup schedule.", + } + }, + { + "not": { "properties": { - "schedule": { - "type": "string", - "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." - }, - "ttl": { - "type": "string", - "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." - }, - "snapshotMoveData": { - "type": "boolean", - "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } } } } } + ] + }, + "then": { + "properties": { + "vpn": { + "properties": { + "vpcId": { + "type": "null" + } + } + } } } }, @@ -2386,13 +2469,33 @@ "$ref": "#/$defs/Types.AwsRegion", "description": "The region where the velero bucket is located" }, - "bucketName": { - "$ref": "#/$defs/Types.AwsS3BucketName", - "maxLength": 49, - "description": "The name of the velero bucket" + "subnetsCidrs": { + "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" + } + }, + "required": [ + "cidr", + "subnetsCidrs" + ] + }, + "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { + "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", + "additionalProperties": false, + "properties": { + "private": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" + "public": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.Cidr" + }, + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -2439,22 +2542,113 @@ } } }, - "then": { - "required": [ - "dex", - "pomerium", - "baseDomain" - ] + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." + } + }, + "required": [ + "allowedFromCidrs", + "githubUsersName" + ] + }, + "Spec.Kubernetes": { + "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", + "additionalProperties": false, + "properties": { + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." + }, + "clusterIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." + }, + "workersIAMRoleNamePrefixOverride": { + "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." + }, + "subnetIds": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsSubnetId" }, - "else": { - "properties": { - "dex": { - "type": "null" - }, - "pomerium": { - "type": "null" - } + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." + }, + "apiServer": { + "$ref": "#/$defs/Spec.Kubernetes.APIServer" + }, + "serviceIpV4Cidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." + }, + "nodeAllowedSshPublicKey": { + "anyOf": [ + { + "$ref": "#/$defs/Types.AwsSshPubKey" + }, + { + "$ref": "#/$defs/Types.FileRef" } + ], + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." + }, + "nodePoolsLaunchKind": { + "type": "string", + "enum": [ + "launch_configurations", + "launch_templates", + "both" + ], + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + }, + "logRetentionDays": { + "type": "integer", + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] + }, + "logsTypes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ] + }, + "minItems": 0, + "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." + }, + "nodePools": { + "type": "array", + "items": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool" } }, { From b0eb79cce12c4e985fbfd3a90374fe3084cb894d Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 16:54:12 +0100 Subject: [PATCH 120/160] chore: generate go-models and docs --- .../ekscluster/v1alpha2/private/schema.go | 4720 ++++++++--------- pkg/apis/ekscluster/v1alpha2/public/schema.go | 2061 +++---- .../kfddistribution/v1alpha2/public/schema.go | 1462 +++-- pkg/apis/onpremises/v1alpha2/public/schema.go | 530 +- schemas/private/ekscluster-kfd-v1alpha2.json | 920 ++-- 5 files changed, 4248 insertions(+), 5445 deletions(-) diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 8fa711d7c..ddcb3ac0b 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -10,68 +10,173 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service -type EksclusterKfdV1Alpha2 struct { - // ApiVersion corresponds to the JSON schema field "apiVersion". - ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} - // Kind corresponds to the JSON schema field "kind". - Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} - // Metadata corresponds to the JSON schema field "metadata". - Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` +type TypesKubeNodeSelector map[string]string - // Spec corresponds to the JSON schema field "spec". - Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +type SpecDistributionCommonProvider struct { + // The provider type. Don't set. FOR INTERNAL USE ONLY. + Type string `json:"type" yaml:"type" mapstructure:"type"` } -type EksclusterKfdV1Alpha2Kind string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} -const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" +type TypesKubeTolerationEffect string -type Metadata struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } -type Spec struct { - // Distribution corresponds to the JSON schema field "distribution". - Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} - // DistributionVersion corresponds to the JSON schema field "distributionVersion". - DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` +const ( + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" +) - // Infrastructure corresponds to the JSON schema field "infrastructure". - Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` +type TypesKubeTolerationOperator string - // Kubernetes corresponds to the JSON schema field "kubernetes". - Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} - // Plugins corresponds to the JSON schema field "plugins". - Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} - // Region corresponds to the JSON schema field "region". - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +const ( + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" +) - // This map defines which will be the common tags that will be added to all the - // resources created on AWS. - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". - ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` -} + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` -type SpecDistribution struct { - // Common corresponds to the JSON schema field "common". - Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - // CustomPatches corresponds to the JSON schema field "customPatches". - CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} - // Modules corresponds to the JSON schema field "modules". - Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { // The node selector to use to place the pods for all the KFD modules. Follows // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. @@ -101,12 +206,55 @@ type SpecDistributionCommon struct { Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified - Type string `json:"type" yaml:"type" mapstructure:"type"` +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } -type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" +) + +type TypesKubeLabels map[string]string + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { // The behavior of the configmap @@ -321,47 +469,19 @@ type SpecDistributionModulesAuthDexExpiry struct { // Dex ID tokens expiration time duration (default 24h). IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - // Dr corresponds to the JSON schema field "dr". - Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` - - // Ingress corresponds to the JSON schema field "ingress". - Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` - - // Logging corresponds to the JSON schema field "logging". - Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` - - // Monitoring corresponds to the JSON schema field "monitoring". - Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` - - // Networking corresponds to the JSON schema field "networking". - Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` +} - // Policy corresponds to the JSON schema field "policy". - Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Tracing corresponds to the JSON schema field "tracing". - Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` -} - -type SpecDistributionModulesAuth struct { - // The base domain for the ingresses created by the Auth module (Gangplank, - // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will - // use the `external` ingress class. - BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - - // Dex corresponds to the JSON schema field "dex". - Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Pomerium corresponds to the JSON schema field "pomerium". - Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` - - // Provider corresponds to the JSON schema field "provider". - Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { // Additional static clients defitions that will be added to the default clients // included with the distribution in Dex's configuration. Example: @@ -389,23 +509,22 @@ type SpecDistributionModulesAuthDex struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesAuthDexExpiry struct { - // Dex ID tokens expiration time duration (default 24h). - IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - - // Dex signing key expiration time duration (default 6h). - SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` -} - -type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the auth module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the auth module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil } type SpecDistributionModulesAuthOverridesIngress struct { @@ -416,6 +535,28 @@ type SpecDistributionModulesAuthOverridesIngress struct { IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress // Override the common configuration with a particular configuration for the Auth @@ -533,9 +674,9 @@ func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { // Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, - // Dex). Notice that when nginx type is dual, these will use the `external` - // ingress class. + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -674,25 +815,27 @@ type TypesFuryModuleOverridesIngress struct { // `.spec.modules.auth.provider.type` is SSO or Basic Auth. DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** - Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` -} +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` -type SpecDistributionModulesAuthProviderType string + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` -const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" - SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" - SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" -) + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} type SpecDistributionModulesAws struct { // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". @@ -713,48 +856,60 @@ type SpecDistributionModulesAws struct { Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"` } -type SpecDistributionModulesAwsClusterAutoscaler struct { - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesAwsEbsCsiDriver struct { - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesAwsEbsSnapshotController struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") + } + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") + } + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") + } + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + } + type Plain SpecDistributionModulesAws + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAws(plain) + return nil } -type SpecDistributionModulesAwsLoadBalancerController struct { - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` +type SpecDistributionModulesDrType string - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", } -type SpecDistributionModulesDr struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of the DR, must be ***none*** or ***eks*** - Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` - - // Velero corresponds to the JSON schema field "velero". - Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil } -type SpecDistributionModulesDrType string - const ( SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" @@ -865,23 +1020,28 @@ type SpecDistributionModulesDrVeleroEks struct { Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } -// Configuration for Velero's backup schedules. -type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero schedules. - Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` - - // Whether to install or not the default `manifests` and `full` backups schedules. - // Default is `true`. - Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` -} - -// Configuration for Velero schedules. -type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { - // Configuration for Velero's manifests backup schedule. - Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - - // Configuration for Velero's manifests backup schedule. - Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVeleroEks(plain) + return nil } // Configuration for Velero's manifests backup schedule. @@ -915,57 +1075,121 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -type SpecDistributionModulesIngress struct { - // The base domain used for all the KFD infrastructural ingresses. If in the nginx - // `dual` configuration type, this value should be the same as the - // `.spec.distribution.modules.ingress.dns.private.name` zone. - BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - - // CertManager corresponds to the JSON schema field "certManager". - CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` - - // Dns corresponds to the JSON schema field "dns". - Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` - - // ExternalDns corresponds to the JSON schema field "externalDns". - ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} - // Configurations for the nginx ingress controller module - Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` } -type SpecDistributionModulesIngressCertManager struct { - // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". - ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` +type SpecDistributionModulesDrVelero struct { + // Eks corresponds to the JSON schema field "eks". + Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer - Email string `json:"email" yaml:"email" mapstructure:"email"` - - // The name of the clusterIssuer. - Name string `json:"name" yaml:"name" mapstructure:"name"` - // Route53 corresponds to the JSON schema field "route53". - Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +} - // The list of challenge solvers to use instead of the default one for the - // `http01` challenge. Check [cert manager's - // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) - // for examples for this field. - Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + } + type Plain SpecDistributionModulesDrVelero + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVelero(plain) + return nil +} - // The type of the cluster issuer, must be ***dns01*** or ***http01*** - Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +// Configuration for the Disaster Recovery module. +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` + + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} + +type SpecDistributionModulesIngressClusterIssuerRoute53 struct { + // HostedZoneId corresponds to the JSON schema field "hostedZoneId". + HostedZoneId string `json:"hostedZoneId" yaml:"hostedZoneId" mapstructure:"hostedZoneId"` + + // IamRoleArn corresponds to the JSON schema field "iamRoleArn". + IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + + // Region corresponds to the JSON schema field "region". + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + return nil } type SpecDistributionModulesIngressCertManagerClusterIssuerType string @@ -1006,30 +1230,79 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer. + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` // Route53 corresponds to the JSON schema field "route53". Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` - // IamRoleArn corresponds to the JSON schema field "iamRoleArn". - IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // Region corresponds to the JSON schema field "region". - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionModulesIngressDNS struct { +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["route53"]; !ok || v == nil { + return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} + +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` + // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} - // Private corresponds to the JSON schema field "private". - Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` - - // Public corresponds to the JSON schema field "public". - Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { // By default, a Terraform data source will be used to get the private DNS zone. // Set to `true` to create the private zone instead. @@ -1143,29 +1416,34 @@ type SpecDistributionModulesIngressForecastle struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesIngressNginx struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tls corresponds to the JSON schema field "tls". - Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +type SpecDistributionModulesIngressNginxTLSProvider string - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** - Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** - Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` - - // Secret corresponds to the JSON schema field "secret". - Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil } -type SpecDistributionModulesIngressNginxTLSProvider string - const ( SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" @@ -1269,17 +1547,47 @@ func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error const ( SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" ) -type SpecDistributionModulesIngressOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The node selector to use to place the pods for the ingress module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The tolerations that will be added to the pods for the ingress module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil } type SpecDistributionModulesIngressOverridesIngresses struct { @@ -1287,9 +1595,11 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } -type SpecDistributionModulesLogging struct { - // Cerebro corresponds to the JSON schema field "cerebro". - Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` +// Override the common configuration with a particular configuration for the +// Ingress module. +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` @@ -1300,8 +1610,8 @@ type SpecDistributionModulesLogging struct { } type SpecDistributionModulesIngress struct { - // The base domain used for all the KFD ingresses. If in the nginx `dual` - // configuration type, this value should be the same as the + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` @@ -1324,46 +1634,60 @@ type SpecDistributionModulesIngress struct { Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an - // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. - // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for - // storage. - // - `customOuputs`: the Logging Operator will be deployed and installed but - // without in-cluster storage, you will have to create the needed Outputs and - // ClusterOutputs to ship the logs to your desired storage. - // - // Default is `opensearch`. - Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` -} - -// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. -type SpecDistributionModulesLoggingCerebro struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// When using the `customOutputs` logging type, you need to manually specify the -// spec of the several `Output` and `ClusterOutputs` that the Logging Operator -// expects to forward the logs collected by the pre-defined flows. -type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from the `audit` Flow will be sent. This - // will be the `spec` section of the `Output` object. It must be a string (and not - // a YAML object) following the OutputSpec definition. Use the `nullout` output to - // discard the flow: `nullout: {}` - Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - - // This value defines where the output from the `errors` Flow will be sent. This - // will be the `spec` section of the `Output` object. It must be a string (and not - // a YAML object) following the OutputSpec definition. Use the `nullout` output to - // discard the flow: `nullout: {}` - Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - - // This value defines where the output from the `events` Flow will be sent. This - // will be the `spec` section of the `Output` object. It must be a string (and not +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") + } + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil +} + +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not // a YAML object) following the OutputSpec definition. Use the `nullout` output to // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` @@ -1399,30 +1723,71 @@ type SpecDistributionModulesLoggingCustomOutputs struct { SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } -type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". - Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +type SpecDistributionModulesLoggingLokiBackend string - // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the - // time series database from BoltDB to TSDB and the schema from v11 to v13 that it - // uses to store the logs. - // - // The value of this field will determine the date when Loki will start writing - // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB - // and schema will be kept until they expire for reading purposes. - // - // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: - // `2024-11-18`. - TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesLoggingLokiBackend string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil +} const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" @@ -1447,6 +1812,83 @@ type SpecDistributionModulesLoggingLokiExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +// Configuration for the Loki package. +type SpecDistributionModulesLoggingLoki struct { + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Loki's external storage backend. + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil +} + +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1458,14 +1900,38 @@ type SpecDistributionModulesLoggingMinio struct { StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` +type SpecDistributionModulesLoggingOpensearchType string - // The username of the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil } +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + type SpecDistributionModulesLoggingOpensearch struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1481,13 +1947,25 @@ type SpecDistributionModulesLoggingOpensearch struct { Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } -type SpecDistributionModulesLoggingOpensearchType string - -const ( - SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" - SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil +} +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1510,11 +1988,11 @@ const ( SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - +// Configuration for the Logging module. +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + // CustomOutputs corresponds to the JSON schema field "customOutputs". CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` @@ -1533,34 +2011,37 @@ type SpecDistributionModulesMonitoring struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: will install Prometheus operator, an instance of - // Prometheus in Agent mode (no alerting, no queries, no storage), and all the - // exporters needed to get metrics for the status of the cluster and the - // workloads. Useful when having a centralized (remote) Prometheus where to ship - // the metrics and not storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir - // that allows for longer retention of metrics and the usage of Object Storage. + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. // - // Default is `prometheus`. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + // Default is `opensearch`. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` +} - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + } + type Plain SpecDistributionModulesLogging + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLogging(plain) + return nil } type SpecDistributionModulesMonitoringAlertManager struct { @@ -1611,21 +2092,32 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type SpecDistributionModulesMonitoringMimirBackend string - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesMonitoringMimirBackend string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + } + *j = SpecDistributionModulesMonitoringMimirBackend(v) + return nil +} const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" @@ -1650,15 +2142,23 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -type SpecDistributionModulesMonitoringMinio struct { +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|ยตs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { @@ -1787,7 +2287,26 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesNetworking struct { +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1802,14 +2321,14 @@ type SpecDistributionModulesNetworking struct { // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. // @@ -1845,23 +2364,62 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { type SpecDistributionModulesNetworkingType string -const SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", +} - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` +const ( + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" +) +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // Type corresponds to the JSON schema field "type". + Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + } + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + return nil } +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. @@ -2006,12 +2564,15 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` + + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -2045,8 +2606,8 @@ type SpecDistributionModulesTracingMinioRootUser struct { // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } // Configuration for Tracing's MinIO deployment. @@ -2061,30 +2622,33 @@ type SpecDistributionModulesTracingMinio struct { StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` +type SpecDistributionModulesTracingTempoBackend string - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil } -type SpecDistributionModulesTracingTempoBackend string - const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" @@ -2108,2380 +2672,1345 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + type SpecDistributionModulesTracingType string +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil +} + const ( SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" ) -type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS - Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - // This section defines the creation of VPN bastions - Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` -} - -type SpecInfrastructureVpc struct { - // Network corresponds to the JSON schema field "network". - Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` -} - -type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created - Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` - - // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". - SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` -} - -type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // The network CIDRs for the private subnets, where the nodes, the pods, and the - // private load balancers will be created - Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - - // The network CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created - Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` -} - -type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states - BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file - DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - - // The size of the disk in GB - DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - - // Overrides the default IAM user name for the VPN - IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - - // The size of the AWS EC2 instance - InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - - // The number of instances to create, 0 to skip the creation - Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - - // The username of the account to create in the bastion's operating system - OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - - // The port used by the OpenVPN server - Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected - VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` -} - -type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH - AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user - GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented - PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` -} - -type SpecKubernetes struct { - // ApiServer corresponds to the JSON schema field "apiServer". - ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` - - // AwsAuth corresponds to the JSON schema field "awsAuth". - AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - - // Overrides the default IAM role name prefix for the EKS cluster - ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` - - // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. - LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user - NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` - - // Global default AMI type used for EKS worker nodes. This will apply to all node - // pools unless overridden by a specific node pool. - NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` - - // Accepted values are `launch_configurations`, `launch_templates` or `both`. For - // new clusters use `launch_templates`, for adopting an existing cluster you'll - // need to migrate from `launch_configurations` to `launch_templates` using `both` - // as interim. - NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - - // This value defines the CIDR that will be used to assign IP addresses to the - // services - ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // Overrides the default IAM role name prefix for the EKS workers - WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` -} - -type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets - PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets - PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - - // This value defines if the API server will be accessible from the public subnets - PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets - PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` -} - -type SpecKubernetesAwsAuth struct { - // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap - AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` - - // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap - Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` - - // This optional array defines additional IAM users that will be added to the - // aws-auth configmap - Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -type SpecKubernetesAwsAuthRole struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Rolearn corresponds to the JSON schema field "rolearn". - Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesAwsAuthUser struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Userarn corresponds to the JSON schema field "userarn". - Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesLogsTypesElem string - -const ( - SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" - SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" - SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" - SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" - SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" -) - -type SpecKubernetesNodePool struct { - // AdditionalFirewallRules corresponds to the JSON schema field - // "additionalFirewallRules". - AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` - - // Ami corresponds to the JSON schema field "ami". - Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` - - // This optional array defines additional target groups to attach to the instances - // in the node pool - AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - - // The container runtime to use for the nodes - ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` - - // Instance corresponds to the JSON schema field "instance". - Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - - // Kubernetes labels that will be added to the nodes - Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - - // The name of the node pool. - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Size corresponds to the JSON schema field "size". - Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - - // This value defines the subnet IDs where the nodes will be created - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // AWS tags that will be added to the ASG and EC2 instances - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Kubernetes taints that will be added to the nodes - Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - - // The type of Node Pool, can be `self-managed` for using customization like - // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from - // Amazon via the `ami.type` field. It is recommended to use `self-managed`. - Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecInfrastructureVpcNetwork struct { - // The network CIDR for the VPC that will be created - Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Tags corresponds to the JSON schema field "tags". - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { - // From corresponds to the JSON schema field "from". - From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` - - // To corresponds to the JSON schema field "to". - To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // If true, the source will be the security group itself - Self bool `json:"self" yaml:"self" mapstructure:"self"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // The source security group ID - SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. - CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` - - // Self corresponds to the JSON schema field "self". - Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` - - // SourceSecurityGroupId corresponds to the JSON schema field - // "sourceSecurityGroupId". - SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` -} - -// Configuration for customize the Amazon Machine Image (AMI) for the machines of -// the Node Pool. -// -// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields -// for using a custom AMI (just with `self-managed` node pool type) or by setting -// the `ami.type` field to one of the official AMIs based on Amazon Linux. -type SpecKubernetesNodePoolAmi struct { - // The ID of the AMI to use for the nodes, must be set toghether with the `owner` - // field. `ami.id` and `ami.owner` can be only set when Node Pool type is - // `self-managed` and they can't be set at the same time than `ami.type`. - Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` - - // The owner of the AMI to use for the nodes, must be set toghether with the `id` - // field. `ami.id` and `ami.owner` can be only set when Node Pool type is - // `self-managed` and they can't be set at the same time than `ami.type`. - Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` - - // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type - // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at - // the same time than `ami.id` and `ami.owner`. - Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecKubernetesNodePoolAmiType string - -const ( - SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" - SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" -) - -type SpecKubernetesNodePoolContainerRuntime string - -const ( - SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" - SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" -) - -type SpecKubernetesNodePoolGlobalAmiType string - -const ( - SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" - SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" -) - -type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". - MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - - // If true, the nodes will be created as spot instances - Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - - // The instance type to use for the nodes - Type string `json:"type" yaml:"type" mapstructure:"type"` - - // The size of the disk in GB - VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - - // VolumeType corresponds to the JSON schema field "volumeType". - VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` -} - -type SpecKubernetesNodePoolInstanceVolumeType string - -const ( - SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" - SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" - SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" - SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" -) - -type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool - Max int `json:"max" yaml:"max" mapstructure:"max"` - - // The minimum number of nodes in the node pool - Min int `json:"min" yaml:"min" mapstructure:"min"` -} - -type SpecKubernetesNodePoolType string - -const ( - SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" - SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" -) - -type SpecKubernetesNodePoolsLaunchKind string - -const ( - SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" - SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" - SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // The username of the account to create in the bastion's operating system. - OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - - // The port where each OpenVPN server will listen for connections. - Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // Disable running `helm diff` validation when installing the plugin, it will - // still be done when upgrading. - DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The network CIDR that will be used to assign IP addresses to the VPN clients - // when connected. - VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") - } - type Plain SpecInfrastructureVpn - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpn(plain) - return nil -} - -type SpecToolsConfigurationTerraform struct { - // State corresponds to the JSON schema field "state". - State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` -} - -type SpecToolsConfigurationTerraformState struct { - // S3 corresponds to the JSON schema field "s3". - S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` -} - -type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - - // This value defines which folder will be used to store all the states inside the - // bucket - KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - - // This value defines in which region the bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` - - // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region - SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` -} - -type TypesAwsArn string - -type TypesAwsIamRoleName string - -type TypesAwsIamRoleNamePrefix string - -type TypesAwsIpProtocol string - -type TypesAwsRegion string - -const ( - TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" - TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" - TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" - TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" - TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" -) - -type TypesAwsS3BucketName string - -type TypesAwsS3BucketNamePrefix string - -type TypesAwsS3KeyPrefix string - -type TypesAwsSshPubKey string - -type TypesAwsSubnetId string - -type TypesAwsTags map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") - } - type Plain SpecDistributionModulesIngressDNSPublic - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressDNSPublic(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) - } - *j = TypesAwsRegion(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") - } - type Plain SpecDistributionModulesTracing - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesTracing(plain) - return nil -} - -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") - } - type Plain SpecDistributionModules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModules(plain) - return nil -} - -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} - -type TypesCidr string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) - return nil -} - -const ( - SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" - SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" -) - -// Port range for the Firewall Rule. -type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { - // From corresponds to the JSON schema field "from". - From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` - - // To corresponds to the JSON schema field "to". - To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) - return nil -} - -type TypesAwsIpProtocol string - -type TypesAwsTags map[string]string - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil -} - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { - // CidrBlocks corresponds to the JSON schema field "cidrBlocks". - CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Additional AWS tags for the Firewall rule. - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the Firewall rule, can be `ingress` for incoming traffic or - // `egress` for outgoing traffic. - Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") - } - type Plain SpecInfrastructureVpcNetwork - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpcNetwork(plain) - return nil -} - -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") - } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpc(plain) - return nil -} - -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - -type TypesTcpPort int - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") - } - type Plain SpecDistributionModulesAws - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAws(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") - } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) - return nil -} - -type TypesAwsVpcId string - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") - } - type Plain SpecInfrastructureVpn - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpn(plain) - return nil -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") - } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAPIServer(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") - } - type Plain SpecDistributionModulesAwsLoadBalancerController - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") - } - type Plain SpecKubernetesAwsAuthRole - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAwsAuthRole(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") - } - type Plain SpecDistributionModulesAwsEbsCsiDriver - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsEbsCsiDriver(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") - } - type Plain SpecKubernetesAwsAuthUser - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAwsAuthUser(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") - } - type Plain SpecDistributionModulesAwsClusterAutoscaler - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") - } - type Plain SpecDistributionModulesPolicy - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicy(plain) - return nil -} - -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) - } - *j = SpecKubernetesLogsTypesElem(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil -} - -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil -} - -var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ - "alinux2", - "alinux2023", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) - } - *j = SpecKubernetesNodePoolGlobalAmiType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil -} - -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} - -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") - } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyGatekeeper(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistributionModulesTracing(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) - return nil + // Aws corresponds to the JSON schema field "aws". + Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModules(plain) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistribution(plain) return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} +type TypesCidr string -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", +// Network CIDRS configuration for private and public subnets. +type SpecInfrastructureVpcNetworkSubnetsCidrs struct { + // The network CIDRs for the private subnets, where the nodes, the pods, and the + // private load balancers will be created + Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` + + // The network CIDRs for the public subnets, where the public load balancers and + // the VPN servers will be created + Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) - } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", +type SpecInfrastructureVpcNetwork struct { + // The network CIDR for the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` + + // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". + SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesMonitoring + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. +type SpecInfrastructureVpc struct { + // Network corresponds to the JSON schema field "network". + Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecInfrastructureVpc(plain) return nil } +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +type SpecInfrastructureVpnSsh struct { + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. + AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` + + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. + GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` + + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. + PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } +type TypesAwsVpcId string + +// Configuration for the VPN server instances. +type SpecInfrastructureVpn struct { + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). + BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` + + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. + DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` + + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. + DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` + + // Overrides IAM user name for the VPN. Default is to use the cluster name. + IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` + + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. + InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` + + // The number of VPN server instances to create, `0` to skip the creation. + Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` + + // The username of the account to create in the bastion's operating system. + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` + + // The port where each OpenVPN server will listen for connections. + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + } + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecInfrastructureVpn(plain) return nil } +type SpecInfrastructure struct { + // Vpc corresponds to the JSON schema field "vpc". + Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` + + // Vpn corresponds to the JSON schema field "vpn". + Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` +} + +type SpecKubernetesAPIServer struct { + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. + PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` + + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. + PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` + + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. + PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` + + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. + PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecDistributionModulesDrVelero + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesAPIServer(plain) return nil } -var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ - "alinux2", - "alinux2023", -} +type SpecKubernetesAwsAuthRole struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) - } - *j = SpecKubernetesNodePoolAmiType(v) - return nil + // Rolearn corresponds to the JSON schema field "rolearn". + Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + } + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } +type SpecKubernetesAwsAuthUser struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Userarn corresponds to the JSON schema field "userarn". + Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html +type SpecKubernetesAwsAuth struct { + // This optional array defines additional AWS accounts that will be added to the + // `aws-auth` configmap. + AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` + + // This optional array defines additional IAM roles that will be added to the + // `aws-auth` configmap. + Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` + + // This optional array defines additional IAM users that will be added to the + // `aws-auth` configmap. + Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type TypesAwsIamRoleNamePrefix string + +type SpecKubernetesLogRetentionDays int + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecKubernetesLogRetentionDays(v) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +type SpecKubernetesLogsTypesElem string + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +const ( + SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" + SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" + SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" + SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" + SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" +) + +type SpecKubernetesNodePoolGlobalAmiType string + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Port range for the Firewall Rule. +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["route53"]; !ok || v == nil { - return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") - } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManager(plain) - return nil +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } +type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { + // The name of the Firewall rule. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // If `true`, the source will be the security group itself. + Self bool `json:"self" yaml:"self" mapstructure:"self"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecDistributionModulesIngressExternalDNS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - *j = SpecDistributionModulesIngressExternalDNS(plain) - return nil -} - -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -type TypesKubeLabels_1 map[string]string +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { + // The name for the additional Firewall rule Security Group. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // The source security group ID. + SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecKubernetesNodePoolSize + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } +// Optional additional firewall rules that will be attached to the nodes. +type SpecKubernetesNodePoolAdditionalFirewallRules struct { + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. + CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` + + // Self corresponds to the JSON schema field "self". + Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` + + // SourceSecurityGroupId corresponds to the JSON schema field + // "sourceSecurityGroupId". + SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } -type TypesKubeTaints []string +type SpecKubernetesNodePoolAmiType string + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecKubernetesNodePoolAmiType(v) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + +// Configuration for customize the Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. +type SpecKubernetesNodePoolAmi struct { + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` + + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` +} + +type SpecKubernetesNodePoolContainerRuntime string + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +const ( + SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" + SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" +) + +type SpecKubernetesNodePoolInstanceVolumeType string + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } +const ( + SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" + SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" + SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" + SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" +) + +// Configuration for the instances that will be used in the node pool. +type SpecKubernetesNodePoolInstance struct { + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` + + // If `true`, the nodes will be created as spot instances. Default is `false`. + Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` + + // The instance type to use for the nodes. + Type string `json:"type" yaml:"type" mapstructure:"type"` + + // The size of the disk in GB. + VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` + + // Volume type for the instance disk. Default is `gp2`. + VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } +type TypesKubeLabels_1 map[string]string + +type SpecKubernetesNodePoolSize struct { + // The maximum number of nodes in the node pool. + Max int `json:"max" yaml:"max" mapstructure:"max"` + + // The minimum number of nodes in the node pool. + Min int `json:"min" yaml:"min" mapstructure:"min"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePool: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecKubernetesNodePool + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type TypesAwsSubnetId string -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", +type TypesKubeTaints []string + +type SpecKubernetesNodePoolType string + +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecKubernetesNodePoolType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil +const ( + SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" + SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" +) + +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. +type SpecKubernetesNodePool struct { + // AdditionalFirewallRules corresponds to the JSON schema field + // "additionalFirewallRules". + AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` + + // Ami corresponds to the JSON schema field "ami". + Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` + + // This optional array defines additional target groups to attach to the instances + // in the node pool. + AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` + + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. + ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` + + // Instance corresponds to the JSON schema field "instance". + Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` + + // Kubernetes labels that will be added to the nodes. + Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` + + // The name of the node pool. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Size corresponds to the JSON schema field "size". + Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` + + // Optional list of subnet IDs where to create the nodes. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // AWS tags that will be added to the ASG and EC2 instances. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Kubernetes taints that will be added to the nodes. + Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` + + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + return fmt.Errorf("field type in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesIngressNginx + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecKubernetesNodePool(plain) return nil } +type SpecKubernetesNodePoolsLaunchKind string + +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") - } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesIngress(plain) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } -type TypesKubeLabels map[string]string +const ( + SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" + SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" + SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" +) + +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. +type SpecKubernetes struct { + // ApiServer corresponds to the JSON schema field "apiServer". + ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` + + // AwsAuth corresponds to the JSON schema field "awsAuth". + AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` + + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. + ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` + + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + + // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. + LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` + + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. + NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + + // NodePools corresponds to the JSON schema field "nodePools". + NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. + NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` + + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. + ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. + WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` +} // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { @@ -4510,43 +4039,12 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") - } - type Plain SpecDistributionModulesLoggingCustomOutputs - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4570,89 +4068,79 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") - } - type Plain SpecDistributionModulesLoggingLoki - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingLoki(plain) - return nil +type TypesAwsS3KeyPrefix string + +// Configuration for the S3 bucket used to store the Terraform state. +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states. + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // This value defines which folder will be used to store all the states inside the + // bucket. + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + + // This value defines in which region the bucket is located. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region. + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4665,40 +4153,26 @@ func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") - } - type Plain SpecToolsConfigurationTerraformStateS3 - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecToolsConfigurationTerraformStateS3(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - *j = SpecDistributionModulesLoggingType(v) + type Plain SpecToolsConfigurationTerraformStateS3 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } +// Configuration for storing the Terraform state of the cluster. +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4717,11 +4191,9 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4742,9 +4214,9 @@ func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4765,24 +4237,34 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Infrastructure corresponds to the JSON schema field "infrastructure". + Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` + + // Kubernetes corresponds to the JSON schema field "kubernetes". + Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` + + // Defines in which AWS region the cluster and all the related resources will be + // created. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This map defines which will be the common tags that will be added to all the + // resources created on AWS. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Configuration for tools used by furyctl, like Terraform. + ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4818,103 +4300,99 @@ func (j *Spec) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") - } - type Plain TypesKubeToleration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = TypesKubeToleration(plain) - return nil -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) - return nil -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} -type TypesKubeTolerationOperator string +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` +} + +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` + + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` + + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` } // UnmarshalJSON implements json.Unmarshaler. @@ -5058,13 +4536,26 @@ type TypesFuryModuleComponentOverrides_1 struct { Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} -type TypesKubeTolerationEffect string +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { @@ -5084,25 +4575,7 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +type TypesAwsSshPubKey string type TypesEnvRef string @@ -5116,23 +4589,7 @@ type TypesSshPubKey string type TypesUri string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") - } - type Plain SpecDistributionCommonProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCommonProvider(plain) - return nil -} +type EksclusterKfdV1Alpha2Kind string var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ "EKSCluster", @@ -5158,30 +4615,21 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeNodeSelector map[string]string +const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" -// UnmarshalJSON implements json.Unmarshaler. -func (j *Metadata) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in Metadata: required") - } - type Plain Metadata - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.Name) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "name", 1) - } - if len(plain.Name) > 56 { - return fmt.Errorf("field %s length: must be <= %d", "name", 56) - } - *j = Metadata(plain) - return nil +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). +type EksclusterKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` } // UnmarshalJSON implements json.Unmarshaler. diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 665c9645d..26863b6e2 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1872,6 +1872,7 @@ const ( SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" ) +// Configuration for the instances that will be used in the node pool. type SpecKubernetesNodePoolInstance struct { // Set the maximum pods per node to a custom value. If not set will use EKS // default value that depends on the instance type. @@ -2055,120 +2056,459 @@ const ( TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) +type TypesAwsS3BucketName string + +type TypesAwsS3BucketNamePrefix string + +type TypesAwsS3KeyPrefix string + +type TypesAwsSshPubKey string + +type TypesAwsSubnetId string + +type TypesAwsTags map[string]string + +type TypesAwsVpcId string + +type TypesCidr string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesIpAddress string + +type TypesKubeLabels map[string]string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeNodeSelector map[string]string + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeTaints []string + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +type TypesKubeTolerationEffect string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" +) + +type TypesKubeTolerationEffect_1 string + +const ( + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" +) + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +type TypesKubeTolerationOperator_1 string + +const ( + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +type TypesUri string + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesDr var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesDr(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + type Plain SpecDistributionModulesDrVelero + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesDrVelero(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") } - type Plain SpecDistributionModulesTracing + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesDrVeleroEks(plain) return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2197,1313 +2537,1183 @@ func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil -} - -type TypesCidr string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecInfrastructureVpcNetwork + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecInfrastructureVpc - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesTracingType(v) return nil } -type TypesAwsS3BucketNamePrefix string - -type TypesTcpPort int - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecInfrastructureVpnSsh + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } -type TypesAwsVpcId string - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecInfrastructureVpn + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - *j = SpecDistributionModulesMonitoringType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesAPIServer + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") } - type Plain SpecKubernetesAwsAuthUser + type Plain SpecDistributionModulesIngressNginxTLSSecret var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthUser(plain) + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesDrType(v) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecKubernetesNodePoolType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesLoggingOpensearch + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") } - type Plain SpecDistributionModulesLoggingLoki + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingLoki(plain) + *j = SpecKubernetesNodePool(plain) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ - "alinux2", - "alinux2023", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - *j = SpecKubernetesNodePoolGlobalAmiType(v) + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecKubernetesNodePoolsLaunchKind(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesAuth(plain) return nil } -type TypesAwsTags map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - type Plain SpecDistributionModulesIngressNginx + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + } + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") + } + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginx(plain) + *j = SpecKubernetes(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesIngressNginxType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecPluginsHelmReleasesElemSetElem var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecPluginsHelmReleasesElemSetElem(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + } + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + *j = SpecKubernetesNodePoolAmiType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + } + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecToolsConfiguration(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ - "alinux2", - "alinux2023", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecKubernetesNodePoolAmiType(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *Spec) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - type Plain SpecDistributionModulesDr + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecDistributionModulesDrVelero + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) - return nil -} - -const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) - } - *j = SpecKubernetesNodePoolContainerRuntime(v) - return nil -} - -const ( - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" -) - -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) - } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = TypesKubeToleration(plain) return nil } -const ( - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesNodePoolInstance + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -type TypesAwsS3BucketName string - -type TypesKubeLabels_1 map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - type Plain SpecKubernetesNodePoolSize - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { + *j = SpecKubernetesLogsTypesElem(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) return nil } -type TypesAwsSubnetId string - -type TypesKubeTaints []string +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { @@ -3525,126 +3735,64 @@ func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { - var v string +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecKubernetesNodePoolType(v) + *j = SpecKubernetesLogRetentionDays(v) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") - } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePool: required") + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecKubernetesNodePool + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesLogging(plain) return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string @@ -3717,56 +3865,52 @@ func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain SpecKubernetes + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecPluginsHelmReleasesElemSetElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecPluginsHelmReleasesElemSetElem(plain) + *j = TypesKubeTolerationEffect_1(v) return nil } @@ -3829,14 +3973,6 @@ func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { return nil } -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string @@ -3857,7 +3993,10 @@ func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON return nil } -type TypesAwsS3KeyPrefix string +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} // UnmarshalJSON implements json.Unmarshaler. func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { @@ -3880,50 +4019,44 @@ func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecInfrastructureVpn var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecInfrastructureVpn(plain) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecToolsConfigurationTerraformState + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } @@ -3952,43 +4085,53 @@ func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } - type Plain SpecToolsConfigurationTerraform + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration_1: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraform(plain) + *j = TypesKubeToleration_1(plain) return nil } -type TypesKubeLabels map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - type Plain SpecToolsConfiguration + type Plain SpecInfrastructureVpc var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecInfrastructureVpc(plain) return nil } +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string @@ -4010,44 +4153,23 @@ func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJ } // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - type Plain Spec + type Plain SpecDistributionModulesAuthPomerium_2 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) + *j = SpecDistributionModulesAuthPomerium_2(plain) return nil } -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4090,11 +4212,6 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4117,186 +4234,106 @@ func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") - } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecDistribution(plain) return nil } -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeTolerationEffect_1 string - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = TypesKubeTolerationEffect_1(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -const ( - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" -) - -type TypesKubeTolerationOperator_1 string - -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - *j = TypesKubeTolerationOperator_1(v) + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") + } + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModules(plain) return nil } -const ( - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain TypesKubeToleration_1 + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - -type TypesKubeTolerationEffect string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + type Plain SpecDistributionCommonProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecDistributionCommonProvider(plain) return nil } -type TypesAwsSshPubKey string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesIpAddress string - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesUri string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4315,10 +4352,6 @@ func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { return nil } -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { var v string @@ -4339,7 +4372,25 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeNodeSelector map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 56ce77f03..950900e65 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -995,681 +995,569 @@ const ( SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` -} - -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` -} - -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` -} - -type SpecDistributionModulesMonitoringKubeStateMetrics struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesMonitoringMimirBackend string - -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the K8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***none***, ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // MaskSize corresponds to the JSON schema field "maskSize". - MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // PodCidr corresponds to the JSON schema field "podCidr". - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" - SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" -) - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil } -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil } -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil } -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil } -type SpecDistributionModulesTracingTempoBackend string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil +} -const SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["maskSize"]; !ok || v == nil { - return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecDistributionModulesNetworkingCilium + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworkingCilium(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesDrVeleroBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil +} + +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesLoggingLoki - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecDistributionModulesLoggingLoki(plain) + *j = SpecDistributionModulesDrType(v) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", +} - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory request for the opensearch pods + // The memory limit for the Pod. Example: `1G`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory limit for the opensearch pods + // The memory request for the Pod. Example: `500M`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionModulesAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") } - *j = SpecDistributionModulesIngressNginxType(v) + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecDistributionModulesIngressNginxTLSSecret + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecDistributionModulesAuthDex(plain) return nil } @@ -1681,89 +1569,94 @@ var enumValues_SpecDistributionModulesLoggingType = []interface{}{ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +const SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesMonitoring + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } -type TypesCidr string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) return nil } -type SpecDistributionModulesTracingType string +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesLogging(plain) return nil } @@ -1823,22 +1716,22 @@ var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } @@ -1949,27 +1842,28 @@ type SpecDistributionModulesMonitoringType string var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ "none", - "calico", - "cilium", + "prometheus", + "prometheusAgent", + "mimir", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionModulesNetworkingType(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } @@ -2038,46 +1932,112 @@ type SpecDistributionModulesMonitoring struct { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesDr + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } +type TypesCidr string + +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") + } + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + } + type Plain SpecDistributionModulesNetworkingCilium + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesNetworkingCilium(plain) + return nil +} + +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesNetworkingType string + +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", + "calico", + "cilium", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesDrVeleroBackend(v) + *j = SpecDistributionModulesNetworkingType(v) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +const ( + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or + // `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2098,25 +2058,7 @@ func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil -} +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ "deny", @@ -2144,33 +2086,30 @@ func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2194,23 +2133,7 @@ func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil -} +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ "Audit", @@ -2218,67 +2141,45 @@ var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []i } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return err } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2302,26 +2203,7 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil -} +type SpecDistributionModulesPolicyType string var enumValues_SpecDistributionModulesPolicyType = []interface{}{ "none", @@ -2349,69 +2231,28 @@ func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil -} +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2432,51 +2273,28 @@ func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } +type SpecDistributionModulesTracingTempoBackend string + var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ "minio", "externalEndpoint", @@ -2502,69 +2320,51 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } -const SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil -} +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil -} +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } // UnmarshalJSON implements json.Unmarshaler. @@ -2592,10 +2392,22 @@ const ( SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" ) -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3094,7 +2906,6 @@ var enumValues_TypesKubeTaintsEffect = []interface{}{ "PreferNoSchedule", "NoExecute", } -type TypesEnvRef string // UnmarshalJSON implements json.Unmarshaler. func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { @@ -3156,13 +2967,6 @@ func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { *j = TypesKubeTaints(plain) return nil } -type TypesFileRef string - -type TypesIpAddress string - -type TypesKubeLabels_1 map[string]string - -type TypesKubeTaints []string type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 6731d7450..fc7ec380e 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -1198,430 +1198,110 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -// Configuration for Monitoring's MinIO deployment. -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each MinIO disk, 6 disks total. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the default MinIO root user. - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the default MinIO root user. - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the `k8s` Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the `k8s` Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the `k8s` Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -// Configuration for the Networking module. -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. - // Default is `calico`. - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesNetworkingCilium struct { - // The mask size to use for the Pods network on each node. - MaskSize *string `json:"maskSize,omitempty" yaml:"maskSize,omitempty" mapstructure:"maskSize,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Allows specifing a CIDR for the Pods network different from - // `.spec.kubernetes.podCidr`. If not set the default is to use - // `.spec.kubernetes.podCidr`. - PodCidr *TypesCidr `json:"podCidr,omitempty" yaml:"podCidr,omitempty" mapstructure:"podCidr,omitempty"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" -) - -// Configuration for the Policy module. -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of policy enforcement to use, either `none`, `gatekeeper` or - // `kyverno`. - // - // Default is `none`. - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -// Configuration for the Gatekeeper package. -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The default enforcement action to use for the included constraints. `deny` will - // block the admission when violations to the policies are found, `warn` will show - // a message to the user but will admit the violating requests and `dryrun` won't - // give any feedback to the user but it will log the violations. - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // Set to `false` to avoid installing the default Gatekeeper policies (constraints - // templates and constraints) included with the distribution. - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -// Configuration for the Kyverno package. -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the policies on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // Set to `false` to avoid installing the default Kyverno policies included with - // distribution. - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the policies, `Enforce` will block - // when a request does not comply with the policies and `Audit` will not block but - // log when a request does not comply with the policies. - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -// Configuration for the Tracing module. -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either `none` or `tempo`. `none` will disable the - // Tracing module and `tempo` will install a Grafana Tempo deployment. - // - // Default is `tempo`. - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -// Configuration for Tracing's MinIO deployment. -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each MinIO disk, 6 disks total. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the default MinIO root user. - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the default MinIO root user. - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -// Configuration for the Tempo package. -type SpecDistributionModulesTracingTempo struct { - // The storage backend type for Tempo. `minio` will use an in-cluster MinIO - // deployment for object storage, `externalEndpoint` can be used to point to an - // external S3-compatible object storage instead of deploying an in-cluster MinIO. - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // Configuration for Tempo's external storage backend. - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the traces stored in Tempo. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -// Configuration for Tempo's external storage backend. -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key ID (username) for the external S3-compatible bucket. - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external S3-compatible object storage. - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The external S3-compatible endpoint for Tempo's storage. - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, will use HTTP as protocol instead of HTTPS. - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key (password) for the external S3-compatible bucket. - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -// Defines the Kubernetes components configuration and the values needed for the -// kubernetes phase of furyctl. -type SpecKubernetes struct { - // Advanced corresponds to the JSON schema field "advanced". - Advanced *SpecKubernetesAdvanced `json:"advanced,omitempty" yaml:"advanced,omitempty" mapstructure:"advanced,omitempty"` - - // AdvancedAnsible corresponds to the JSON schema field "advancedAnsible". - AdvancedAnsible *SpecKubernetesAdvancedAnsible `json:"advancedAnsible,omitempty" yaml:"advancedAnsible,omitempty" mapstructure:"advancedAnsible,omitempty"` - - // The address for the Kubernetes control plane. Usually a DNS entry pointing to a - // Load Balancer on port 6443. - ControlPlaneAddress string `json:"controlPlaneAddress" yaml:"controlPlaneAddress" mapstructure:"controlPlaneAddress"` - - // The DNS zone of the machines. It will be appended to the name of each host to - // generate the `kubernetes_hostname` in the Ansible inventory file. It is also - // used to calculate etcd's initial cluster value. - DnsZone string `json:"dnsZone" yaml:"dnsZone" mapstructure:"dnsZone"` - - // LoadBalancers corresponds to the JSON schema field "loadBalancers". - LoadBalancers SpecKubernetesLoadBalancers `json:"loadBalancers" yaml:"loadBalancers" mapstructure:"loadBalancers"` - - // Masters corresponds to the JSON schema field "masters". - Masters SpecKubernetesMasters `json:"masters" yaml:"masters" mapstructure:"masters"` - - // Nodes corresponds to the JSON schema field "nodes". - Nodes SpecKubernetesNodes `json:"nodes" yaml:"nodes" mapstructure:"nodes"` - - // The path to the folder where the PKI files for Kubernetes and etcd are stored. - PkiFolder string `json:"pkiFolder" yaml:"pkiFolder" mapstructure:"pkiFolder"` - - // The subnet CIDR to use for the Pods network. - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` - - // Proxy corresponds to the JSON schema field "proxy". - Proxy *SpecKubernetesProxy `json:"proxy,omitempty" yaml:"proxy,omitempty" mapstructure:"proxy,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecKubernetesSSH `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The subnet CIDR to use for the Services network. - SvcCidr TypesCidr `json:"svcCidr" yaml:"svcCidr" mapstructure:"svcCidr"` -} - -type SpecKubernetesAdvanced struct { - // AirGap corresponds to the JSON schema field "airGap". - AirGap *SpecKubernetesAdvancedAirGap `json:"airGap,omitempty" yaml:"airGap,omitempty" mapstructure:"airGap,omitempty"` - - // Cloud corresponds to the JSON schema field "cloud". - Cloud *SpecKubernetesAdvancedCloud `json:"cloud,omitempty" yaml:"cloud,omitempty" mapstructure:"cloud,omitempty"` - - // Containerd corresponds to the JSON schema field "containerd". - Containerd *SpecKubernetesAdvancedContainerd `json:"containerd,omitempty" yaml:"containerd,omitempty" mapstructure:"containerd,omitempty"` - - // Encryption corresponds to the JSON schema field "encryption". - Encryption *SpecKubernetesAdvancedEncryption `json:"encryption,omitempty" yaml:"encryption,omitempty" mapstructure:"encryption,omitempty"` - - // Oidc corresponds to the JSON schema field "oidc". - Oidc *SpecKubernetesAdvancedOIDC `json:"oidc,omitempty" yaml:"oidc,omitempty" mapstructure:"oidc,omitempty"` - - // URL of the registry where to pull images from for the Kubernetes phase. - // (Default is registry.sighup.io/fury/on-premises). - Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - - // Users corresponds to the JSON schema field "users". - Users *SpecKubernetesAdvancedUsers `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -// Advanced configuration for air-gapped installations. Allows setting custom URLs -// where to download the binaries dependencies from and custom .deb and .rpm -// package repositories. -type SpecKubernetesAdvancedAirGap struct { - // URL where to download the `.tar.gz` with containerd from. The `tar.gz` should - // be as the one downloaded from containerd GitHub releases page. - ContainerdDownloadUrl *string `json:"containerdDownloadUrl,omitempty" yaml:"containerdDownloadUrl,omitempty" mapstructure:"containerdDownloadUrl,omitempty"` - - // DependenciesOverride corresponds to the JSON schema field - // "dependenciesOverride". - DependenciesOverride *SpecKubernetesAdvancedAirGapDependenciesOverride `json:"dependenciesOverride,omitempty" yaml:"dependenciesOverride,omitempty" mapstructure:"dependenciesOverride,omitempty"` - - // URL to the path where the etcd `tar.gz`s are available. etcd will be downloaded - // from - // `//etcd--linux-.tar.gz` - EtcdDownloadUrl *string `json:"etcdDownloadUrl,omitempty" yaml:"etcdDownloadUrl,omitempty" mapstructure:"etcdDownloadUrl,omitempty"` - - // Checksum for the runc binary. - RuncChecksum *string `json:"runcChecksum,omitempty" yaml:"runcChecksum,omitempty" mapstructure:"runcChecksum,omitempty"` - - // URL where to download the runc binary from. - RuncDownloadUrl *string `json:"runcDownloadUrl,omitempty" yaml:"runcDownloadUrl,omitempty" mapstructure:"runcDownloadUrl,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + } + *j = SpecDistributionModulesTracingType(v) + return nil } -type SpecKubernetesAdvancedAirGapDependenciesOverride struct { - // Apt corresponds to the JSON schema field "apt". - Apt *SpecKubernetesAdvancedAirGapDependenciesOverrideApt `json:"apt,omitempty" yaml:"apt,omitempty" mapstructure:"apt,omitempty"` - - // Yum corresponds to the JSON schema field "yum". - Yum *SpecKubernetesAdvancedAirGapDependenciesOverrideYum `json:"yum,omitempty" yaml:"yum,omitempty" mapstructure:"yum,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } -type SpecKubernetesAdvancedAirGapDependenciesOverrideApt struct { - // URL where to download the GPG key of the Apt repository. Example: - // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // The GPG key ID of the Apt repository. Example: - // `36A1D7869245C8950F966E92D8576A8BA88D21E9` - GpgKeyId string `json:"gpg_key_id" yaml:"gpg_key_id" mapstructure:"gpg_key_id"` - - // An indicative name for the Apt repository. Example: `k8s-1.29` - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // A source string for the new Apt repository. Example: `deb - // https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /` - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecKubernetesAdvancedAirGapDependenciesOverrideYum struct { - // URL where to download the ASCII-armored GPG key of the Yum repository. Example: - // `https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key` - GpgKey string `json:"gpg_key" yaml:"gpg_key" mapstructure:"gpg_key"` - - // If true, the GPG signature check on the packages will be enabled. - GpgKeyCheck bool `json:"gpg_key_check" yaml:"gpg_key_check" mapstructure:"gpg_key_check"` - - // An indicative name for the Yum repository. Example: `k8s-1.29` - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // URL to the directory where the Yum repository's `repodata` directory lives. - // Example: `https://pkgs.k8s.io/core:/stable:/v1.29/rpm/` - Repo string `json:"repo" yaml:"repo" mapstructure:"repo"` - - // If true, the GPG signature check on the `repodata` will be enabled. - RepoGpgCheck bool `json:"repo_gpg_check" yaml:"repo_gpg_check" mapstructure:"repo_gpg_check"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecKubernetesAdvancedAnsible struct { - // Additional configuration to append to the ansible.cfg file - Config *string `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` - - // The Python interpreter to use for running Ansible. Example: python3 - PythonInterpreter *string `json:"pythonInterpreter,omitempty" yaml:"pythonInterpreter,omitempty" mapstructure:"pythonInterpreter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -1718,18 +1398,18 @@ var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ } type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods + // The CPU limit for the Pod. Example: `1000m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory limit for the prometheus pods + // The memory limit for the Pod. Example: `1G`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } type TypesKubeResourcesRequests struct { - // The cpu request for the loki pods + // The CPU request for the Pod, in cores. Example: `500m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory request for the prometheus pods + // The memory request for the Pod. Example: `500M`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } @@ -2405,8 +2085,8 @@ type SpecDistributionModulesNetworking struct { // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - // The type of CNI plugin to use, either `calico` (default, via the Tigera - // Operator) or `cilium`. + // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. + // Default is `calico`. Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } @@ -2622,6 +2302,8 @@ type SpecDistributionModulesPolicy struct { // The type of policy enforcement to use, either `none`, `gatekeeper` or // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } @@ -2703,7 +2385,7 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Tempo's storage. + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -2773,6 +2455,8 @@ type SpecDistributionModulesTracing struct { // The type of tracing to use, either `none` or `tempo`. `none` will disable the // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 072f7e7ba..843e7503f 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -159,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -171,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -200,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -300,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -313,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -320,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -337,31 +338,59 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "The username of the account to create in the bastion's operating system." + }, + "dhParamsBits": { + "type": "integer", + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." + }, + "vpnClientsSubnetCidr": { + "$ref": "#/$defs/Types.Cidr", + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." + }, + "ssh": { + "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" + }, + "vpcId": { + "$ref": "#/$defs/Types.AwsVpcId", + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." + }, + "bucketNamePrefix": { + "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." + }, + "iamUserNameOverride": { + "$ref": "#/$defs/Types.AwsIamRoleName", + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } - } + }, + "required": [ + "ssh", + "vpnClientsSubnetCidr" + ] }, - "Spec.Distribution.Common.Provider": { + "Spec.Infrastructure.Vpn.Ssh": { "type": "object", "additionalProperties": false, "properties": { @@ -377,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -385,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -407,29 +436,29 @@ "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -440,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -449,7 +478,7 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -461,7 +490,32 @@ }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -501,7 +555,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -509,7 +563,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -517,11 +571,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -532,18 +586,62 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", "type": "string", - "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." + "enum": [ + "eks-managed", + "self-managed" + ] + }, + "name": { + "type": "string", + "description": "The name of the node pool." + }, + "ami": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Ami" + }, + "containerRuntime": { + "type": "string", + "enum": [ + "docker", + "containerd" + ], + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." + }, + "size": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" + }, + "instance": { + "$ref": "#/$defs/Spec.Kubernetes.NodePool.Instance" + }, + "attachedTargetGroups": { + "type": "array", + "items": { + "$ref": "#/$defs/Types.AwsArn" + }, + "description": "This optional array defines additional target groups to attach to the instances in the node pool." + }, + "labels": { + "$ref": "#/$defs/Types.KubeLabels", + "description": "Kubernetes labels that will be added to the nodes." + }, + "taints": { + "$ref": "#/$defs/Types.KubeTaints", + "description": "Kubernetes taints that will be added to the nodes." + }, + "tags": { + "$ref": "#/$defs/Types.AwsTags", + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -653,21 +751,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -676,7 +776,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -690,12 +791,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -706,6 +807,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -713,7 +815,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -740,13 +843,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -776,7 +881,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -784,19 +889,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -816,7 +921,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -824,19 +929,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -869,6 +974,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -876,21 +982,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1027,28 +1133,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -1058,7 +1165,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1115,6 +1222,10 @@ "type": "string", "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, + "nginx": { + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", + "description": "Configurations for the Ingress nginx controller package." + }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." @@ -1135,140 +1246,82 @@ "baseDomain", "nginx" ], - "then": { - "required": [ - "certManager" - ] - }, - "type": "object" - }, - "Spec.Distribution.Modules.Ingress.CertManager": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", - "properties": { - "clusterIssuer": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - }, - "required": [ - "clusterIssuer" - ] - }, - "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { - "additionalProperties": false, - "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", - "oneOf": [ + "allOf": [ { - "required": [ - "type" - ] + "if": { + "properties": { + "nginx": { + "properties": { + "type": { + "const": "dual" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public", + "private" + ] + } + } + } }, { - "required": [ - "solvers" - ] - } - ], - "properties": { - "email": { - "type": "string", - "format": "email", - "description": "The email address to use during the certificate issuing process." - }, - "name": { - "type": "string", - "description": "The name of the clusterIssuer." - }, - "route53": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" - }, - "solvers": { - "type": "array", - "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." - }, - "type": { - "type": "string", - "enum": [ - "dns01", - "http01" - ], - "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." - } - }, - "required": [ - "route53", - "name", - "email" - ], - "type": "object" - }, - "Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53": { - "type": "object", - "additionalProperties": false, - "properties": { - "iamRoleArn": { - "$ref": "#/$defs/Types.AwsArn" - }, - "region": { - "$ref": "#/$defs/Types.AwsRegion" - }, - "hostedZoneId": { - "type": "string" - } - }, - "required": [ - "hostedZoneId", - "iamRoleArn", - "region" - ] - }, - "Spec.Distribution.Modules.Ingress.DNS": { - "type": "object", - "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", - "additionalProperties": false, - "properties": { - "public": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Public" - }, - "private": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS.Private" + "if": { + "properties": { + "nginx": { + "properties": { + "type": { + "const": "single" + } + } + } + } + }, + "then": { + "required": [ + "dns" + ], + "properties": { + "dns": { + "required": [ + "public" + ] + } + } + } }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + { + "if": { + "properties": { + "nginx": { + "properties": { + "tls": { + "properties": { + "provider": { + "const": "certManager" + } + } + } + } + } + } + }, + "then": { + "required": [ + "certManager" + ] + } } - }, - "required": [ - "public", - "private" ] }, - "Spec.Distribution.Modules.Ingress.DNS.Private": { - "additionalProperties": false, - "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", - "properties": { - "create": { - "type": "boolean", - "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." - }, - "name": { - "type": "string", - "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." - }, - "vpcId": { - "type": "string" - } - }, - "required": [ - "vpcId", - "name", - "create" - ], - "type": "object" - }, "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, @@ -1279,14 +1332,14 @@ }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1392,6 +1445,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1407,15 +1461,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1423,11 +1478,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." }, "route53": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" @@ -1473,11 +1528,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1487,15 +1542,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." }, "vpcId": { "type": "string" @@ -1602,14 +1658,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1636,7 +1692,7 @@ "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1644,11 +1700,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1713,6 +1769,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1720,41 +1777,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1827,15 +1884,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1866,20 +1923,17 @@ "type": "object", "additionalProperties": false, "properties": { - "resources": { - "$ref": "#/$defs/Types.KubeResources" - }, - "retentionTime": { + "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, - "storageSize": { + "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -2018,7 +2072,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -2034,10 +2088,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -2045,31 +2100,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -2085,7 +2141,7 @@ "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -2093,11 +2149,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -2262,6 +2318,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2272,7 +2329,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2304,149 +2361,50 @@ "additionalProperties": false, "description": "Configuration for Velero's backup schedules.", "properties": { - "username": { - "type": "string", - "description": "The username for the default MinIO root user." - }, - "password": { - "type": "string", - "description": "The password for the default MinIO root user." - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Distribution.Modules.Tracing.Tempo": { - "type": "object", - "additionalProperties": false, - "description": "Configuration for the Tempo package.", - "properties": { - "retentionTime": { - "type": "string", - "description": "The retention time for the traces stored in Tempo." - }, - "backend": { - "type": "string", - "enum": [ - "minio", - "externalEndpoint" - ], - "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." - }, - "externalEndpoint": { - "description": "Configuration for Tempo's external storage backend.", - "type": "object", - "additionalProperties": false, - "properties": { - "endpoint": { - "type": "string", - "description": "The external S3-compatible endpoint for Tempo's storage." - }, - "insecure": { + "install": { "type": "boolean", - "description": "If true, will use HTTP as protocol instead of HTTPS." + "description": "Whether to install or not the default `manifests` and `full` backups schedules. Default is `true`." }, - "secretAccessKey": { - "type": "string", - "description": "The secret access key (password) for the external S3-compatible bucket." - }, - "accessKeyId": { - "type": "string", - "description": "The access key ID (username) for the external S3-compatible bucket." - }, - "bucketName": { - "type": "string", - "description": "The bucket name of the external S3-compatible object storage." - } - } - }, - "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" - } - } - }, - "Spec.Infrastructure": { - "type": "object", - "additionalProperties": false, - "properties": { - "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc" - }, - "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn" - } - }, - "allOf": [ - { - "if": { - "allOf": [ - { - "properties": { - "vpc": { - "type": "null" - } - } - }, - { - "not": { - "properties": { - "vpn": { - "type": "null" - } - } - } - } - ] - }, - "then": { - "properties": { - "vpn": { - "required": [ - "vpcId" - ] - } - } - } - }, - { - "if": { - "allOf": [ - { - "not": { + "definitions": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero schedules.", + "properties": { + "manifests": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", "properties": { - "vpc": { - "type": "null" + "schedule": { + "type": "string", + "description": "The cron expression for the `manifests` backup schedule (default `*/15 * * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." } } - } - }, - { - "not": { + }, + "full": { + "type": "object", + "additionalProperties": false, + "description": "Configuration for Velero's manifests backup schedule.", "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } + "schedule": { + "type": "string", + "description": "The cron expression for the `full` backup schedule (default `0 1 * * *`)." + }, + "ttl": { + "type": "string", + "description": "The Time To Live (TTL) of the backups created by the backup schedules (default `720h0m0s`, 30 days). Notice that changing this value will affect only newly created backups, prior backups will keep the old TTL." + }, + "snapshotMoveData": { + "type": "boolean", + "description": "EXPERIMENTAL (if you do more than one backups, the following backups after the first are not automatically restorable, see https://github.com/vmware-tanzu/velero/issues/7057#issuecomment-2466815898 for the manual restore solution): SnapshotMoveData specifies whether snapshot data should be moved. Velero will create a new volume from the snapshot and upload the content to the storageLocation." } } } } - ] - }, - "then": { - "properties": { - "vpn": { - "properties": { - "vpcId": { - "type": "null" - } - } - } } } }, @@ -2467,35 +2425,15 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, - "subnetsCidrs": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" - } - }, - "required": [ - "cidr", - "subnetsCidrs" - ] - }, - "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { - "type": "object", - "description": "Network CIDRS configuration for private and public subnets.", - "additionalProperties": false, - "properties": { - "private": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "bucketName": { + "$ref": "#/$defs/Types.AwsS3BucketName", + "maxLength": 49, + "description": "The name of the bucket for Velero." }, - "public": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.Cidr" - }, - "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "iamRoleArn": { + "$ref": "#/$defs/Types.AwsArn" } }, "required": [ @@ -2517,7 +2455,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2542,113 +2480,22 @@ } } }, - "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." - } - }, - "required": [ - "allowedFromCidrs", - "githubUsersName" - ] - }, - "Spec.Kubernetes": { - "type": "object", - "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", - "additionalProperties": false, - "properties": { - "vpcId": { - "$ref": "#/$defs/Types.AwsVpcId", - "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." - }, - "clusterIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." - }, - "workersIAMRoleNamePrefixOverride": { - "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." - }, - "subnetIds": { - "type": "array", - "items": { - "$ref": "#/$defs/Types.AwsSubnetId" - }, - "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." - }, - "apiServer": { - "$ref": "#/$defs/Spec.Kubernetes.APIServer" - }, - "serviceIpV4Cidr": { - "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." - }, - "nodeAllowedSshPublicKey": { - "anyOf": [ - { - "$ref": "#/$defs/Types.AwsSshPubKey" - }, - { - "$ref": "#/$defs/Types.FileRef" - } - ], - "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." - }, - "nodePoolsLaunchKind": { - "type": "string", - "enum": [ - "launch_configurations", - "launch_templates", - "both" - ], - "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." - }, - "logRetentionDays": { - "type": "integer", - "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", - "enum": [ - 0, - 1, - 3, - 5, - 7, - 14, - 30, - 60, - 90, - 120, - 150, - 180, - 365, - 400, - 545, - 731, - 1096, - 1827, - 2192, - 2557, - 2922, - 3288, - 3653 - ] - }, - "logsTypes": { - "type": "array", - "items": { - "type": "string", - "enum": [ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler" + "then": { + "required": [ + "dex", + "pomerium", + "baseDomain" ] }, - "minItems": 0, - "description": "Optional list of Kubernetes Cluster log types to enable. Defaults to all types." - }, - "nodePools": { - "type": "array", - "items": { - "$ref": "#/$defs/Spec.Kubernetes.NodePool" + "else": { + "properties": { + "dex": { + "type": "null" + }, + "pomerium": { + "type": "null" + } + } } }, { @@ -2687,10 +2534,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2700,10 +2548,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2716,11 +2565,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2739,7 +2588,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2756,11 +2605,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2774,14 +2623,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -3063,40 +2913,6 @@ ] }, "Types.KubeResources": { - "type": "object", - "additionalProperties": false, - "properties": { - "requests": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu request for the prometheus pods" - }, - "memory": { - "type": "string", - "description": "The memory request for the opensearch pods" - } - } - }, - "limits": { - "type": "object", - "additionalProperties": false, - "properties": { - "cpu": { - "type": "string", - "description": "The cpu limit for the opensearch pods" - }, - "memory": { - "type": "string", - "description": "The memory limit for the opensearch pods" - } - } - } - } - }, - "Types.FuryModuleOverrides": { "type": "object", "additionalProperties": false, "properties": { @@ -3137,7 +2953,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -3147,7 +2963,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -3163,7 +2979,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -3173,7 +2989,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -3183,7 +2999,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -3193,7 +3009,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -3214,7 +3030,7 @@ }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } }, From bbc3169c295ddc82a1ee672976ab2a9d771418f8 Mon Sep 17 00:00:00 2001 From: Giuseppe Iannelli Date: Mon, 25 Nov 2024 17:31:51 +0100 Subject: [PATCH 121/160] chore: bump eks-installer to v3.2.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 64dc65e64..41dd9ce23 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -16,7 +16,7 @@ modules: kubernetes: eks: version: 1.30 - installer: v3.2.0-rc1 + installer: v3.2.0 onpremises: version: 1.30.6 installer: v1.30.6-rc.2 From 378607de1dab798cb70917855d71d94348bfbaef Mon Sep 17 00:00:00 2001 From: Stefano Ghinelli Date: Tue, 26 Nov 2024 08:49:21 +0100 Subject: [PATCH 122/160] fix(network-policies): add missing label on pomerium policies --- templates/distribution/manifests/auth/policies/common.yaml.tpl | 1 + 1 file changed, 1 insertion(+) diff --git a/templates/distribution/manifests/auth/policies/common.yaml.tpl b/templates/distribution/manifests/auth/policies/common.yaml.tpl index dfe83bd10..1b8300e14 100644 --- a/templates/distribution/manifests/auth/policies/common.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/common.yaml.tpl @@ -23,6 +23,7 @@ metadata: name: all-egress-kube-dns namespace: pomerium labels: + cluster.kfd.sighup.io/module: auth cluster.kfd.sighup.io/auth-provider-type: sso spec: podSelector: From ba251ee147becff33dffbd6ec5b84b38ae40b924 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Tue, 26 Nov 2024 12:34:09 +0100 Subject: [PATCH 123/160] fix(netpols): minio-monitoring ingress and opensearch jobs - Add netpol for allowing traffic from ingresses to Minio Monitoring in single, dual and SSO. - Use new labels for OpenSearch and OpenSearch-Dashboard job pods - Update readme with this changes --- .../modules/logging/README.md | 9 ++- .../manifests/auth/policies/pomerium.yaml.tpl | 4 ++ .../policies/opensearch-dashboards.yaml.tpl | 32 ++++++++-- .../logging/policies/opensearch.yaml.tpl | 20 ++---- .../monitoring/policies/minio.yaml.tpl | 64 +++++++++++++++++-- 5 files changed, 105 insertions(+), 24 deletions(-) diff --git a/docs/network-policies/modules/logging/README.md b/docs/network-policies/modules/logging/README.md index 41fb930d9..df072997e 100644 --- a/docs/network-policies/modules/logging/README.md +++ b/docs/network-policies/modules/logging/README.md @@ -1,15 +1,18 @@ # Logging Module Network Policies ## Components + - OpenSearch Stack - Loki Stack ## Namespaces + - logging ## Network Policies List ### Common Policies + - deny-all - all-egress-kube-dns - event-tailer-egress-kube-apiserver @@ -20,6 +23,7 @@ - logging-operator-egress-kube-apiserver ### OpenSearch Stack + - fluentd-ingress-fluentbit - fluentd-ingress-prometheus-metrics - opensearch-discovery @@ -31,8 +35,10 @@ - opensearch-dashboards-ingress-nginx - opensearch-dashboards-ingress-jobs - jobs-egress-opensearch +- jobs-egress-opensearch-dashboards ### Loki Stack + - loki-distributed-ingress-fluentd - loki-distributed-ingress-grafana - loki-distributed-ingress-prometheus-metrics @@ -40,6 +46,7 @@ - loki-distributed-egress-all ### MinIO + - minio-ingress-namespace - minio-buckets-setup-egress-kube-apiserver - minio-buckets-setup-egress-minio @@ -48,6 +55,6 @@ - minio-egress-https ## Configurations + - [OpenSearch Stack](opensearch.md) - [Loki Stack](loki.md) - diff --git a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl index c82c7fc4f..d610a905d 100644 --- a/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl +++ b/templates/distribution/manifests/auth/policies/pomerium.yaml.tpl @@ -24,7 +24,11 @@ spec: kubernetes.io/metadata.name: ingress-nginx podSelector: matchLabels: +{{- if eq .spec.distribution.modules.ingress.nginx.type "dual" }} + app: ingress +{{- else if eq .spec.distribution.modules.ingress.nginx.type "single" }} app: ingress-nginx +{{- end }} ports: - port: 8080 protocol: TCP diff --git a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl index 0b10c7bce..6a8fb98cc 100644 --- a/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch-dashboards.yaml.tpl @@ -47,9 +47,9 @@ spec: ingress: - from: - podSelector: - matchExpressions: - - key: batch.kubernetes.io/job-name - operator: Exists + matchLabels: + app.kubernetes.io/name: opensearch-dashboards + app.kubernetes.io/instance: opensearch-dashboards ports: - port: 5601 protocol: TCP @@ -91,4 +91,28 @@ spec: - port: 5601 protocol: TCP --- - +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: jobs-egress-opensearch-dashboards + namespace: logging + labels: + cluster.kfd.sighup.io/module: logging + cluster.kfd.sighup.io/logging-type: opensearch +spec: + policyTypes: + - Egress + podSelector: + matchLabels: + app.kubernetes.io/name: opensearch-dashboards + app.kubernetes.io/instance: opensearch-dashboards + egress: + - to: + - podSelector: + matchLabels: + app: opensearch-dashboards + release: opensearch-dashboards + ports: + - port: 5601 + protocol: TCP +--- diff --git a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl index 2ddcd18b2..fccfeae54 100644 --- a/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl +++ b/templates/distribution/manifests/logging/policies/opensearch.yaml.tpl @@ -136,9 +136,9 @@ spec: ingress: - from: - podSelector: - matchExpressions: - - key: batch.kubernetes.io/job-name - operator: Exists + matchLabels: + app.kubernetes.io/name: opensearch + app.kubernetes.io/instance: opensearch ports: - port: 9200 protocol: TCP @@ -155,18 +155,10 @@ spec: policyTypes: - Egress podSelector: - matchExpressions: - - key: batch.kubernetes.io/job-name - operator: Exists + matchLabels: + app.kubernetes.io/name: opensearch + app.kubernetes.io/instance: opensearch egress: - - to: - - podSelector: - matchLabels: - app: opensearch-dashboards - release: opensearch-dashboards - ports: - - port: 5601 - protocol: TCP - to: - podSelector: matchLabels: diff --git a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl index 7fcce1a79..2af4eae0e 100644 --- a/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl +++ b/templates/distribution/manifests/monitoring/policies/minio.yaml.tpl @@ -9,7 +9,8 @@ metadata: name: minio-ingress-namespace namespace: monitoring labels: - app: minio + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio spec: policyTypes: - Ingress @@ -43,7 +44,8 @@ metadata: name: minio-buckets-setup-egress-kube-apiserver namespace: monitoring labels: - app: minio-monitoring-buckets-setup + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio spec: policyTypes: - Egress @@ -61,7 +63,8 @@ metadata: name: minio-buckets-setup-egress-minio namespace: monitoring labels: - app: minio-monitoring-buckets-setup + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio spec: policyTypes: - Egress @@ -87,7 +90,8 @@ metadata: name: minio-ingress-prometheus-metrics namespace: monitoring labels: - app: minio + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio spec: policyTypes: - Ingress @@ -109,8 +113,11 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: minio-monitoring-egress-all + name: minio-monitoring-egress-https namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio spec: policyTypes: - Egress @@ -122,3 +129,50 @@ spec: - port: 443 protocol: TCP --- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: minio-ingress-nginx + namespace: monitoring + labels: + cluster.kfd.sighup.io/module: monitoring + cluster.kfd.sighup.io/monitoring-backend: minio +spec: + policyTypes: + - Ingress + podSelector: + matchLabels: + app: minio + ingress: +# single nginx, no sso +{{ if and (eq .spec.distribution.modules.ingress.nginx.type "single") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress-nginx +# dual nginx, no sso +{{ else if and (eq .spec.distribution.modules.ingress.nginx.type "dual") (ne .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app: ingress +# sso +{{ else if (eq .spec.distribution.modules.auth.provider.type "sso") }} + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: pomerium + podSelector: + matchLabels: + app: pomerium +{{ end }} + ports: + - port: 9001 + protocol: TCP +--- From 0641960106f0393a2778dccd590a6d37923f7187 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Tue, 26 Nov 2024 14:40:18 +0100 Subject: [PATCH 124/160] docs(schemas): improve description for OpenSearch storageSize --- docs/schemas/ekscluster-kfd-v1alpha2.md | 2 +- docs/schemas/kfddistribution-kfd-v1alpha2.md | 2 +- docs/schemas/onpremises-kfd-v1alpha2.md | 2 +- pkg/apis/ekscluster/v1alpha2/private/schema.go | 3 ++- pkg/apis/ekscluster/v1alpha2/public/schema.go | 3 ++- pkg/apis/kfddistribution/v1alpha2/public/schema.go | 3 ++- pkg/apis/onpremises/v1alpha2/public/schema.go | 3 ++- schemas/private/ekscluster-kfd-v1alpha2.json | 2 +- schemas/public/ekscluster-kfd-v1alpha2.json | 2 +- schemas/public/kfddistribution-kfd-v1alpha2.json | 2 +- schemas/public/onpremises-kfd-v1alpha2.json | 2 +- 11 files changed, 15 insertions(+), 11 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 453eb6cdf..a165f8cf1 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2861,7 +2861,7 @@ The memory request for the Pod. Example: `500M`. ### Description -The storage size for the OpenSearch volumes. +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index 8b950ab96..63db395b1 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2348,7 +2348,7 @@ The memory request for the Pod. Example: `500M`. ### Description -The storage size for the OpenSearch volumes. +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index d4c4437c1..9bb0ae9d0 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2472,7 +2472,7 @@ The memory request for the Pod. Example: `500M`. ### Description -The storage size for the OpenSearch volumes. +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index ddcb3ac0b..c99816b68 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -1939,7 +1939,8 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the OpenSearch volumes. + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` // The type of OpenSearch deployment. One of: `single` for a single replica or diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 26863b6e2..ba2f64980 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1029,7 +1029,8 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the OpenSearch volumes. + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` // The type of OpenSearch deployment. One of: `single` for a single replica or diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index 950900e65..e8f0ddf11 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -968,7 +968,8 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the OpenSearch volumes. + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` // The type of OpenSearch deployment. One of: `single` for a single replica or diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index fc7ec380e..3d0b8199b 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -1024,7 +1024,8 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the OpenSearch volumes. + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` // The type of OpenSearch deployment. One of: `single` for a single replica or diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 843e7503f..c8e05bcc9 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1665,7 +1665,7 @@ }, "storageSize": { "type": "string", - "description": "The storage size for the OpenSearch volumes." + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index f42c465e4..e9534708c 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1652,7 +1652,7 @@ }, "storageSize": { "type": "string", - "description": "The storage size for the OpenSearch volumes." + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index 382187e1a..c2d0302b7 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -540,7 +540,7 @@ }, "storageSize": { "type": "string", - "description": "The storage size for the OpenSearch volumes." + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index f9852eb91..e49d59cf0 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1112,7 +1112,7 @@ }, "storageSize": { "type": "string", - "description": "The storage size for the OpenSearch volumes." + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" From 3256cbcbe4aed4c7cee3043bcbc9bf760713227d Mon Sep 17 00:00:00 2001 From: Alessio Dionisi Date: Tue, 26 Nov 2024 16:51:33 +0100 Subject: [PATCH 125/160] deps: use go-jsonschema v0.15.3 --- go.mod | 3 ++- go.sum | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2ef42ff82..8fbb402a3 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.23 require ( github.com/Al-Pragliola/go-version v1.6.2 github.com/go-playground/validator/v10 v10.15.5 - github.com/sighupio/go-jsonschema v0.15.2 + github.com/sighupio/go-jsonschema v0.15.3 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc ) @@ -18,4 +18,5 @@ require ( golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index af7697764..905210baa 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sighupio/go-jsonschema v0.15.2 h1:Yt+QeiIwL9LZpYH+LwqiDD08FG8vjoyngrpHmfqPmmE= -github.com/sighupio/go-jsonschema v0.15.2/go.mod h1:3KaIPMGHZhUcDq2b+6rEZgkpT5mpstnsu+KnSbuf/R4= +github.com/sighupio/go-jsonschema v0.15.3 h1:q2EtYBbXFRQbRbc9/lkFyg2lmxrJFaa8737dvwm/0bo= +github.com/sighupio/go-jsonschema v0.15.3/go.mod h1:QOHAu5BGlMReCwWJx1Yf7FK+Z5D8TrVVT+SOgInHd5I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -36,6 +36,7 @@ golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 30f75cfb9dcdd01685afe44670438d83ce88cb2a Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Tue, 19 Nov 2024 15:58:24 +0100 Subject: [PATCH 126/160] feat: update e2e testing to include 1.30.0 (still WIP, we also need furyctl 0.30.0-rc.X) --- .drone.yml | 60 +++------- tests/e2e-kfddistribution-upgrades.sh | 20 +--- .../furyctl-init-cluster-1.29.0.yaml | 104 ------------------ .../furyctl-init-cluster-1.29.1.yaml | 104 ------------------ .../furyctl-init-cluster-1.29.2.yaml | 104 ------------------ ....yaml => furyctl-init-cluster-1.30.0.yaml} | 2 +- ...l-10-migrate-from-none-to-safe-values.yaml | 2 +- ...-kyverno-default-policies-to-disabled.yaml | 2 +- ...-from-alertmanagerconfigs-to-disabled.yaml | 2 +- .../furyctl-2-migrate-from-tempo-to-none.yaml | 2 +- ...uryctl-3-migrate-from-kyverno-to-none.yaml | 2 +- ...furyctl-4-migrate-from-velero-to-none.yaml | 2 +- .../furyctl-5-migrate-from-loki-to-none.yaml | 2 +- .../furyctl-6-migrate-from-mimir-to-none.yaml | 2 +- ...ryctl-7-migrate-from-basicAuth-to-sso.yaml | 2 +- .../furyctl-8-migrate-from-sso-to-none.yaml | 2 +- .../furyctl-9-migrate-from-nginx-to-none.yaml | 2 +- .../kfddistribution/furyctl-cleanup-all.yaml | 2 +- .../kfddistribution/furyctl-init-cluster.yaml | 2 +- .../furyctl-init-with-values-from-nil.yaml | 2 +- 20 files changed, 37 insertions(+), 385 deletions(-) delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml delete mode 100644 tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml rename tests/e2e/kfddistribution-upgrades/{furyctl-init-cluster-1.29.3.yaml => furyctl-init-cluster-1.30.0.yaml} (98%) diff --git a/.drone.yml b/.drone.yml index 04c9692c4..49d533b6b 100644 --- a/.drone.yml +++ b/.drone.yml @@ -59,8 +59,7 @@ steps: # - schema-check - name: test-schema - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always depends_on: - license-check @@ -69,8 +68,7 @@ steps: - bats -t tests/schema.sh - name: render - # KUBECTL 1.25.3 - KUSTOMIZE 3.5.3 - HELM 3.1.1 - YQ 4.21.1 - ISTIOCTL 1.9.4 - FURYCTL 0.9.0 - BATS 1.1.0 - image: quay.io/sighup/e2e-testing:1.1.0_0.9.0_3.1.1_1.9.4_1.25.3_3.5.3_4.21.1 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always commands: - echo $${NETRC_FILE} > /root/.netrc @@ -90,10 +88,10 @@ steps: - render commands: # we use --ignore-deprecations because we don't want the CI to fail when the API has not been removed yet. - - /pluto detect distribution.yml --ignore-deprecations --target-versions=k8s=v1.29.0 + - /pluto detect distribution.yml --ignore-deprecations --target-versions=k8s=v1.30.0 --- -name: e2e-kubernetes-1.29 +name: e2e-kubernetes-1.30 kind: pipeline type: docker @@ -116,13 +114,13 @@ trigger: steps: - name: create Kind cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 pull: always volumes: - name: dockersock path: /var/run/docker.sock environment: - CLUSTER_VERSION: v1.29.0 + CLUSTER_VERSION: v1.30.5 CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} # /drone/src is the default workdir for the pipeline # using this folder we don't need to mount another @@ -148,8 +146,7 @@ steps: - kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG} - name: e2e-kfddistribution - # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always # we need to use host network to access Kind API port that is listening on the worker's loopback # beacuse we mount the host's Docker socket to run Kind. @@ -157,7 +154,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} KUBECONFIG: /drone/src/kubeconfig - FURYCTL_VERSION: v0.29.7-rc.0 + FURYCTL_VERSION: v0.29.10 # to be changed with v0.30.0-rc.X depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig @@ -175,7 +172,7 @@ steps: - tests/e2e-kfddistribution.sh - name: delete-kind-cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 volumes: - name: dockersock path: /var/run/docker.sock @@ -196,13 +193,13 @@ volumes: host: path: /var/run/docker.sock --- -name: e2e-kubernetes-1.29.0-1.29.1-1.29.2-1.29.3-1.29.4 +name: e2e-kubernetes-1.29.4-to-1.30.0 kind: pipeline type: docker depends_on: - qa - - e2e-kubernetes-1.29 + - e2e-kubernetes-1.30 clone: depth: 1 @@ -220,13 +217,13 @@ trigger: steps: - name: create Kind cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 pull: always volumes: - name: dockersock path: /var/run/docker.sock environment: - CLUSTER_VERSION: v1.29.0 + CLUSTER_VERSION: v1.30.5 CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades # /drone/src is the default workdir for the pipeline # using this folder we don't need to mount another @@ -252,8 +249,7 @@ steps: - kind get kubeconfig --name $${CLUSTER_NAME} > $${KUBECONFIG} - name: e2e-kfddistribution - # KUBECTL_KUSTOMIZE_HELM_YQ_ISTIOCTL_FURYCTL_BATS - image: quay.io/sighup/e2e-testing:1.1.0_0.11.0_3.1.1_1.9.4_1.26.3_3.5.3_4.33.3 + image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always # we need to use host network to access Kind API port that is listening on the worker's loopback # beacuse we mount the host's Docker socket to run Kind. @@ -261,7 +257,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades KUBECONFIG: /drone/src/kubeconfig-upgrades - FURYCTL_VERSION: v0.29.7-rc.0 + FURYCTL_VERSION: v0.29.10 # to be changed with v0.30.0-rc.X depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig-upgrades @@ -279,7 +275,7 @@ steps: - tests/e2e-kfddistribution-upgrades.sh - name: delete-kind-cluster - image: quay.io/sighup/dind-kind-kubectl-kustomize:0.20.0_1.29.1_3.10.0 + image: quay.io/sighup/dind-kind-kubectl-kustomize:0.24.0_1.30.5_3.10.0 volumes: - name: dockersock path: /var/run/docker.sock @@ -305,8 +301,8 @@ kind: pipeline type: docker depends_on: - - e2e-kubernetes-1.29 - - e2e-kubernetes-1.29.0-1.29.1-1.29.2-1.29.3-1.29.4 + - e2e-kubernetes-1.30 + - e2e-kubernetes-1.29.4-to-1.30.0 platform: os: linux @@ -320,22 +316,6 @@ trigger: - refs/tags/**-docs* steps: - - name: prepare-release-manifests - image: quay.io/sighup/e2e-testing:1.1.0_0.7.0_3.1.1_1.9.4_1.24.1_3.8.7_4.21.1 - pull: always - depends_on: [clone] - environment: - RELEASE_MANIFESTS_PATH: fury-distribution-${DRONE_TAG}.yml - commands: - - furyctl vendor -H - - kustomize build . > $${RELEASE_MANIFESTS_PATH} - when: - ref: - include: - - refs/tags/** - exclude: - - refs/tags/**-docs* - - name: prepare-release-notes image: quay.io/sighup/fury-release-notes-plugin:3.7_2.8.4 depends_on: [clone] @@ -352,14 +332,12 @@ steps: image: plugins/github-release pull: always depends_on: - - prepare-release-manifests - prepare-release-notes settings: api_key: from_secret: github_token file_exists: skip files: - - fury-distribution-${DRONE_TAG}.yml - Furyfile.yaml - kustomization.yaml - kfd.yaml @@ -381,14 +359,12 @@ steps: image: plugins/github-release pull: always depends_on: - - prepare-release-manifests - prepare-release-notes settings: api_key: from_secret: github_token file_exists: skip files: - - fury-distribution-${DRONE_TAG}.yml - Furyfile.yaml - kustomization.yaml - kfd.yaml diff --git a/tests/e2e-kfddistribution-upgrades.sh b/tests/e2e-kfddistribution-upgrades.sh index 93eac8f20..ab13449e8 100755 --- a/tests/e2e-kfddistribution-upgrades.sh +++ b/tests/e2e-kfddistribution-upgrades.sh @@ -6,21 +6,9 @@ set -e echo "----------------------------------------------------------------------------" -echo "Executing furyctl for the initial setup" -/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml --outdir "$PWD" --disable-analytics +echo "Executing furyctl for the initial setup 1.29.4" +/tmp/furyctl apply --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml --outdir "$PWD" --disable-analytics echo "----------------------------------------------------------------------------" -echo "Executing upgrade to an intermediate version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the next version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the next version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml --outdir "$PWD" --force upgrades --disable-analytics - -echo "----------------------------------------------------------------------------" -echo "Executing upgrade to the latest version" -/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics +echo "Executing upgrade to 1.30.0" +/tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml deleted file mode 100644 index 3370d37e9..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.0.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.29.0 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: Enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml deleted file mode 100644 index f8e198b63..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.1.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.29.1 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: Enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml deleted file mode 100644 index ce58ffd8e..000000000 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.2.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kfd.sighup.io/v1alpha2 -kind: KFDDistribution -metadata: - name: sighup -spec: - distributionVersion: v1.29.2 - # This section describes how the KFD distribution will be installed - distribution: - kubeconfig: "{env://KUBECONFIG}" - # This common configuration will be applied to all the packages that will be installed in the cluster - common: {} - # This section contains all the configurations for all the KFD core modules - modules: - networking: - type: calico - # This section contains all the configurations for the ingress module - ingress: - baseDomain: fury.sighup.cc - nginx: - type: single - tls: - provider: certManager - certManager: - clusterIssuer: - name: letsencrypt-fury - email: sighup@sighup.cc - type: http01 - logging: - type: loki - minio: - storageSize: 20Gi - rootUser: - username: sighup - password: secretpassword1 - monitoring: - type: prometheus - prometheus: - resources: - requests: - cpu: 10m - limits: - cpu: 2000m - memory: 6Gi - tracing: - type: none - policy: - type: kyverno - kyverno: - additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: Enforce - installDefaultPolicies: true - dr: - type: on-premises - velero: {} - auth: - provider: - type: basicAuth - basicAuth: - username: test - password: testpassword - # patches for kind compatibility and resource setting - customPatches: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: StatefulSet - metadata: - name: minio-logging - namespace: logging - spec: - template: - spec: - containers: - - name: minio - resources: - requests: - cpu: 10m - memory: 50Mi - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-common - namespace: logging - - | - $patch: delete - apiVersion: logging-extensions.banzaicloud.io/v1alpha1 - kind: HostTailer - metadata: - name: systemd-etcd - namespace: logging - - | - $patch: delete - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: x509-certificate-exporter-control-plane - namespace: monitoring diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml similarity index 98% rename from tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml rename to tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml index 682a19275..26f848510 100644 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.3.yaml +++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.3 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml index a30f86d37..88669a656 100644 --- a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml +++ b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml index 922a40ed2..2dbcd6fd6 100644 --- a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml index 775e53d79..f0791b886 100644 --- a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml index 021b2dfbf..06f51a9e6 100644 --- a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml index 3bf20f34b..24f00b281 100644 --- a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml index 1a52d03a0..b1a2a80d8 100644 --- a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml index ffe270e96..db1291dee 100644 --- a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml index 0b4d82ec3..41b9cb148 100644 --- a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml index ddd3770f0..879856c46 100644 --- a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml +++ b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml index 2a195558b..4e54e2d62 100644 --- a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml index 9e0f9d0ff..d5b5c0ca8 100644 --- a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml index 2dcca9681..f6793804f 100644 --- a/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml +++ b/tests/e2e/kfddistribution/furyctl-cleanup-all.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml index 34af41579..ae3bfa7a1 100644 --- a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" diff --git a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml index 7bb8cc590..b8cf642ea 100644 --- a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml @@ -8,7 +8,7 @@ kind: KFDDistribution metadata: name: sighup spec: - distributionVersion: v1.29.2 + distributionVersion: v1.30.0 # This section describes how the KFD distribution will be installed distribution: kubeconfig: "{env://KUBECONFIG}" From 3c138a2146ed359fd91ad189a039cb69ee407c0a Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Thu, 21 Nov 2024 12:03:38 +0100 Subject: [PATCH 127/160] feat(e2e): bump furyctl to v0.30.0-rc.0, drop furyctl legacy - bump furyctl to use v0.30.0-rc.0 - fix render step to use furyctl ng instead of legacy - render the whole distro with the render step to check for outdated APIs in use - download `jv` for the schema test step, it was removed from the e2e-testing image. --- .drone.yml | 31 +++++++++++++------ .../kfddistribution/furyctl-init-cluster.yaml | 2 +- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/.drone.yml b/.drone.yml index 49d533b6b..36979c6fb 100644 --- a/.drone.yml +++ b/.drone.yml @@ -65,21 +65,34 @@ steps: - license-check - schema-check commands: + # we need to download `jv` for running the JSON Schemas tests. + - curl -L https://github.com/santhosh-tekuri/jsonschema/releases/download/v6.0.1/jv-v6.0.1-linux-amd64.tar.gz | tar zx --directory /usr/local/bin/ - bats -t tests/schema.sh - name: render image: quay.io/sighup/e2e-testing:1.1.0_1.30.5_3.10.0_4.33.3 pull: always - commands: - - echo $${NETRC_FILE} > /root/.netrc - - furyctl vendor -H - - kustomize build . > distribution.yml - environment: - NETRC_FILE: - from_secret: NETRC_FILE depends_on: - license-check - schema-check + environment: + NETRC_FILE: + from_secret: NETRC_FILE + FURYCTL_VERSION: v0.30.0-rc.0 + FURYCTL_CONFIG: tests/e2e/kfddistribution/furyctl-init-cluster.yaml + FURYCTL_DISTRO_LOCATION: ./ + FURYCTL_OUTDIR: ./ + FURYCTL_DISABLE_ANALYTICS: "true" + KUBECONFIG: ./dummy + commands: + - echo $${NETRC_FILE} > /root/.netrc + - echo "Installing furyctl version $${FURYCTL_VERSION}..." + - curl -L "https://github.com/sighupio/furyctl/releases/download/$${FURYCTL_VERSION}/furyctl-$(uname -s)-amd64.tar.gz" | tar xz -C /usr/local/bin/ + - furyctl download dependencies && furyctl dump template + # Move the folder with the manifests generated from the templates into the right path + - mv distribution $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG) + # Build the whole distribution + - kustomize build $${FURYTCL_OUTDIR}.furyctl/$$(yq .metadata.name $FURYCTL_CONFIG)/distribution/manifests > distribution.yml - name: check-deprecated-apis image: us-docker.pkg.dev/fairwinds-ops/oss/pluto:v5 @@ -154,7 +167,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} KUBECONFIG: /drone/src/kubeconfig - FURYCTL_VERSION: v0.29.10 # to be changed with v0.30.0-rc.X + FURYCTL_VERSION: v0.30.0-rc.0 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig @@ -257,7 +270,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades KUBECONFIG: /drone/src/kubeconfig-upgrades - FURYCTL_VERSION: v0.29.10 # to be changed with v0.30.0-rc.X + FURYCTL_VERSION: v0.30.0-rc.0 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig-upgrades diff --git a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml index ae3bfa7a1..20edf8932 100644 --- a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: From 957fc898e17aedee6248ec19976c518c2a1640fa Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Thu, 21 Nov 2024 15:18:01 +0100 Subject: [PATCH 128/160] chore(e2e): consider new mandatory field in configuration files --- ...l-10-migrate-from-none-to-safe-values.yaml | 11 ++++++----- ...-kyverno-default-policies-to-disabled.yaml | 11 ++++++----- ...-from-alertmanagerconfigs-to-disabled.yaml | 13 +++++++------ .../furyctl-2-migrate-from-tempo-to-none.yaml | 11 ++++++----- ...uryctl-3-migrate-from-kyverno-to-none.yaml | 11 ++++++----- ...furyctl-4-migrate-from-velero-to-none.yaml | 11 ++++++----- .../furyctl-5-migrate-from-loki-to-none.yaml | 10 ++++------ .../furyctl-6-migrate-from-mimir-to-none.yaml | 5 ++--- ...ryctl-7-migrate-from-basicAuth-to-sso.yaml | 19 +++++++++---------- .../furyctl-8-migrate-from-sso-to-none.yaml | 15 +++++++-------- .../furyctl-9-migrate-from-nginx-to-none.yaml | 13 ++++++------- .../kfddistribution/furyctl-init-cluster.yaml | 3 +++ .../furyctl-init-with-values-from-nil.yaml | 13 +++++++------ 13 files changed, 75 insertions(+), 71 deletions(-) diff --git a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml index 88669a656..3696821c0 100644 --- a/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml +++ b/tests/e2e/kfddistribution/furyctl-10-migrate-from-none-to-safe-values.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +58,7 @@ spec: password: secretpassword2 tracing: type: tempo - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +73,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml index 2dbcd6fd6..61f87e7a3 100644 --- a/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-11-migrate-from-kyverno-default-policies-to-disabled.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +58,7 @@ spec: password: secretpassword2 tracing: type: tempo - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +73,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml index f0791b886..ccf9c5f75 100644 --- a/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml +++ b/tests/e2e/kfddistribution/furyctl-12-migrate-from-alertmanagerconfigs-to-disabled.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -55,11 +56,11 @@ spec: rootUser: username: sighup password: secretpassword2 - alertmanager: + alertmanager: installDefaultRules: false tracing: type: tempo - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -74,7 +75,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml index 06f51a9e6..fc5b82ac8 100644 --- a/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-2-migrate-from-tempo-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +58,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +73,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml index 24f00b281..b7af467b7 100644 --- a/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-3-migrate-from-kyverno-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +58,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +73,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml index b1a2a80d8..68355ca88 100644 --- a/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-4-migrate-from-velero-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,9 @@ spec: type: http01 logging: type: loki - loki: + loki: backend: minio + tsdbStartDate: "2024-11-21" minio: storageSize: 20Gi rootUser: @@ -41,7 +42,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +58,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +73,7 @@ spec: validationFailureAction: Enforce dr: type: none - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml index db1291dee..e4498dd78 100644 --- a/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-5-migrate-from-loki-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -41,7 +39,7 @@ spec: password: secretpassword1 monitoring: type: mimir - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +55,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +70,7 @@ spec: validationFailureAction: Enforce dr: type: none - velero: + velero: backend: minio auth: provider: diff --git a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml index 41b9cb148..73d273bcc 100644 --- a/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-6-migrate-from-mimir-to-none.yaml @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -82,7 +80,8 @@ spec: password: testpassword # patches for kind compatibility and resource setting customPatches: - patchesStrategicMerge: [] + patchesStrategicMerge: + [] #- | # apiVersion: apps/v1 # kind: StatefulSet diff --git a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml index 879856c46..97103487f 100644 --- a/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml +++ b/tests/e2e/kfddistribution/furyctl-7-migrate-from-basicAuth-to-sso.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -41,7 +39,7 @@ spec: password: secretpassword1 monitoring: type: none - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +55,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,7 +70,7 @@ spec: validationFailureAction: Enforce dr: type: none - velero: + velero: backend: minio auth: provider: @@ -85,8 +83,8 @@ spec: SHARED_SECRET: "LEjtmaKtiCB2qA5rtFSHWiWAzkdFftADf/q2xWT64dg=" SIGNING_KEY: "LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU9DMHBBQmx4ZS84bjRQcHBBVUE1QnRxam96Z3dDZVpvRDI2c056TGRiS1hvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFRUpDc253MHlXODRLZXhVSjQ5M21MMG9tNFN5dzJBeGtWOGFpRkxDZFdKaVBYamtUMDE1QwowclJsV2tqNVdlQUhqYmVncmRNL2QyejZTbzY3MWs3TVpRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=" baseDomain: fury.sighup.cc - - dex: + + dex: connectors: - type: ldap id: ldap @@ -112,7 +110,8 @@ spec: nameAttr: cn # patches for kind compatibility and resource setting customPatches: - patchesStrategicMerge: [] + patchesStrategicMerge: + [] #- | # apiVersion: apps/v1 # kind: StatefulSet @@ -180,6 +179,6 @@ spec: # name: x509-certificate-exporter-control-plane # namespace: monitoring plugins: - kustomize: + kustomize: - name: ldap-server folder: ./plugins/ldap-server diff --git a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml index 4e54e2d62..700430afa 100644 --- a/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-8-migrate-from-sso-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -41,7 +39,7 @@ spec: password: secretpassword1 monitoring: type: none - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +55,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,14 +70,15 @@ spec: validationFailureAction: Enforce dr: type: none - velero: + velero: backend: minio auth: provider: type: none # patches for kind compatibility and resource setting customPatches: - patchesStrategicMerge: [] + patchesStrategicMerge: + [] #- | # apiVersion: apps/v1 # kind: StatefulSet @@ -147,6 +146,6 @@ spec: # name: x509-certificate-exporter-control-plane # namespace: monitoring plugins: - kustomize: + kustomize: - name: ldap-server folder: ./plugins/ldap-server diff --git a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml index d5b5c0ca8..21eb481ef 100644 --- a/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml +++ b/tests/e2e/kfddistribution/furyctl-9-migrate-from-nginx-to-none.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,8 +32,6 @@ spec: type: http01 logging: type: none - loki: - backend: minio minio: storageSize: 20Gi rootUser: @@ -41,7 +39,7 @@ spec: password: secretpassword1 monitoring: type: none - mimir: + mimir: backend: minio prometheus: resources: @@ -57,7 +55,7 @@ spec: password: secretpassword2 tracing: type: none - tempo: + tempo: backend: minio minio: storageSize: 20Gi @@ -72,14 +70,15 @@ spec: validationFailureAction: Enforce dr: type: none - velero: + velero: backend: minio auth: provider: type: none # patches for kind compatibility and resource setting customPatches: - patchesStrategicMerge: [] + patchesStrategicMerge: + [] #- | # apiVersion: apps/v1 # kind: StatefulSet diff --git a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml index 20edf8932..d31aced8a 100644 --- a/tests/e2e/kfddistribution/furyctl-init-cluster.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-cluster.yaml @@ -37,6 +37,9 @@ spec: rootUser: username: sighup password: secretpassword1 + loki: + backend: minio + tsdbStartDate: "2024-11-21" monitoring: type: mimir prometheus: diff --git a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml index b8cf642ea..d69e98465 100644 --- a/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml +++ b/tests/e2e/kfddistribution/furyctl-init-with-values-from-nil.yaml @@ -16,7 +16,7 @@ spec: common: {} # This section contains all the configurations for all the KFD core modules modules: - networking: + networking: type: calico # This section contains all the configurations for the ingress module ingress: @@ -32,7 +32,8 @@ spec: type: http01 logging: type: loki - loki: + loki: + tsdbStartDate: "2024-11-21" backend: externalEndpoint externalEndpoint: endpoint: 192.168.1.100:9000 @@ -42,7 +43,7 @@ spec: bucketName: loki monitoring: type: mimir - mimir: + mimir: backend: externalEndpoint externalEndpoint: endpoint: 192.168.1.100:9000 @@ -52,7 +53,7 @@ spec: bucketName: mimir tracing: type: tempo - tempo: + tempo: backend: externalEndpoint externalEndpoint: endpoint: 192.168.1.100:9000 @@ -68,7 +69,7 @@ spec: validationFailureAction: Enforce dr: type: on-premises - velero: + velero: backend: externalEndpoint externalEndpoint: endpoint: 192.168.1.100:9000 @@ -116,4 +117,4 @@ spec: kind: DaemonSet metadata: name: x509-certificate-exporter-control-plane - namespace: monitoring \ No newline at end of file + namespace: monitoring From 5b3a557c3d044c86fef855e72014e168605c0c47 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 18:30:22 +0100 Subject: [PATCH 129/160] chore(ci): fix schema tests --- .drone.yml | 4 +++- tests/schema.sh | 30 +++++++++++++++--------------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/.drone.yml b/.drone.yml index 36979c6fb..bf5711f0e 100644 --- a/.drone.yml +++ b/.drone.yml @@ -64,9 +64,11 @@ steps: depends_on: - license-check - schema-check + environment: + JV_VERSION: 6.0.1 commands: # we need to download `jv` for running the JSON Schemas tests. - - curl -L https://github.com/santhosh-tekuri/jsonschema/releases/download/v6.0.1/jv-v6.0.1-linux-amd64.tar.gz | tar zx --directory /usr/local/bin/ + - curl -L https://github.com/santhosh-tekuri/jsonschema/releases/download/v$${JV_VERSION}/jv-v$${JV_VERSION}-linux-amd64.tar.gz | tar zx --directory /usr/local/bin/ - bats -t tests/schema.sh - name: render diff --git a/tests/schema.sh b/tests/schema.sh index c1320c506..06f133371 100755 --- a/tests/schema.sh +++ b/tests/schema.sh @@ -29,7 +29,7 @@ test_schema() { yq "tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.yaml" -o json > "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" validate() { - jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" + jv "schemas/${KIND}/${APIVER}.json" "${TMPDIR}/tests/schemas/${KIND}/${APIVER}/${EXAMPLE}.json" 2>&1 } run validate @@ -63,8 +63,8 @@ test_schema() { expect() { expect_no "${1}" - local EXPECTED_ERROR_1="[S#/\$defs/Spec/else/properties/kubernetes/properties/vpcId/type] expected null, but got string" - local EXPECTED_ERROR_2="[S#/\$defs/Spec/else/properties/kubernetes/properties/subnetIds/type] expected null, but got array" + local EXPECTED_ERROR_1="at '/spec/kubernetes/vpcId': got string, want null" + local EXPECTED_ERROR_2="at '/spec/kubernetes/subnetIds': got array, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -92,7 +92,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec/then/properties/kubernetes/required] missing properties: 'vpcId', 'subnetIds'" + local EXPECTED_ERROR_1="at '/spec/kubernetes': missing properties 'vpcId', 'subnetIds'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -116,8 +116,8 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/dex/type] expected null, but got object" - local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/0/else/properties/pomerium/type] expected null, but got object" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/dex': got object, want null" + local EXPECTED_ERROR_2="at '/spec/distribution/modules/auth/pomerium': got object, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -145,7 +145,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Auth/allOf/1/then/properties/provider/required] missing properties: 'basicAuth'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/auth/provider': missing property 'basicAuth'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -169,7 +169,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/else/properties/modules/properties/aws/type] expected null, but got object" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/aws': got object, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -193,8 +193,8 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS/then/required] missing properties: 'secret'" - local EXPECTED_ERROR_2="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules/ingress/nginx/tls': missing property 'secret'" + local EXPECTED_ERROR_2="at '/spec/distribution/modules': missing property 'aws'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -222,7 +222,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution/then/properties/modules/required] missing properties: 'aws'" + local EXPECTED_ERROR_1="at '/spec/distribution/modules': missing property 'aws'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -246,7 +246,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="[S#/\$defs/Spec.Distribution.CustomPatches.Patch/oneOf] valid against schemas at indexes 0 and 1" + local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/patches/0': oneOf failed, subschemas 0, 1 matched" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -270,7 +270,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="additionalProperties 'type' not allowed" + local EXPECTED_ERROR_1="at '/spec/distribution/customPatches/configMapGenerator/0': additional properties 'type' not allowed" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -294,7 +294,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="expected null, but got string" + local EXPECTED_ERROR_1="at '/spec/infrastructure/vpn/vpcId': got string, want null" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 @@ -318,7 +318,7 @@ test_schema() { expect() { expect_no - local EXPECTED_ERROR_1="missing properties: 'vpcId'" + local EXPECTED_ERROR_1=" at '/spec/infrastructure/vpn': missing property 'vpcId'" if [[ "${output}" != *"${EXPECTED_ERROR_1}"* ]]; then return 2 From 27a091c7844ef68f28a8d260c9dff4fc0f41cfdf Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 18:37:16 +0100 Subject: [PATCH 130/160] fix(templates): disable netpol for all providers by default --- defaults/ekscluster-kfd-v1alpha2.yaml | 1 + defaults/kfddistribution-kfd-v1alpha2.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 41e37df57..69f128f00 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -15,6 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: eks + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration diff --git a/defaults/kfddistribution-kfd-v1alpha2.yaml b/defaults/kfddistribution-kfd-v1alpha2.yaml index 08a10f651..c943ae1df 100644 --- a/defaults/kfddistribution-kfd-v1alpha2.yaml +++ b/defaults/kfddistribution-kfd-v1alpha2.yaml @@ -15,6 +15,7 @@ data: relativeVendorPath: "../../vendor" provider: type: none + networkPoliciesEnabled: false # the module section will be used to fine tune each module behaviour and configuration modules: # ingress module configuration From 19ac1f2f8a4f9b6790074af53847da66e3206cda Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Mon, 25 Nov 2024 18:43:38 +0100 Subject: [PATCH 131/160] chore(ci): fix kind/node version --- .drone.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index bf5711f0e..fefcaa3cb 100644 --- a/.drone.yml +++ b/.drone.yml @@ -135,7 +135,7 @@ steps: - name: dockersock path: /var/run/docker.sock environment: - CLUSTER_VERSION: v1.30.5 + CLUSTER_VERSION: v1.30.6 CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} # /drone/src is the default workdir for the pipeline # using this folder we don't need to mount another @@ -238,7 +238,7 @@ steps: - name: dockersock path: /var/run/docker.sock environment: - CLUSTER_VERSION: v1.30.5 + CLUSTER_VERSION: v1.30.6 CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades # /drone/src is the default workdir for the pipeline # using this folder we don't need to mount another From 7c95fb4991583e078466f5071bfb9f8dc3ad399b Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Tue, 26 Nov 2024 18:00:27 +0100 Subject: [PATCH 132/160] chore(ci): bump furyctl rc --- .drone.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.drone.yml b/.drone.yml index fefcaa3cb..2ceaab572 100644 --- a/.drone.yml +++ b/.drone.yml @@ -80,7 +80,7 @@ steps: environment: NETRC_FILE: from_secret: NETRC_FILE - FURYCTL_VERSION: v0.30.0-rc.0 + FURYCTL_VERSION: v0.30.0-rc.1 FURYCTL_CONFIG: tests/e2e/kfddistribution/furyctl-init-cluster.yaml FURYCTL_DISTRO_LOCATION: ./ FURYCTL_OUTDIR: ./ @@ -169,7 +169,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER} KUBECONFIG: /drone/src/kubeconfig - FURYCTL_VERSION: v0.30.0-rc.0 + FURYCTL_VERSION: v0.30.0-rc.1 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig @@ -272,7 +272,7 @@ steps: environment: CLUSTER_NAME: ${DRONE_REPO_NAME}-${DRONE_BUILD_NUMBER}-upgrades KUBECONFIG: /drone/src/kubeconfig-upgrades - FURYCTL_VERSION: v0.30.0-rc.0 + FURYCTL_VERSION: v0.30.0-rc.1 depends_on: [create Kind cluster] commands: - export KUBECONFIG=/drone/src/kubeconfig-upgrades From 3c8d733e74707ed21a6c0b2cb9ea2c60a0c11558 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 10:25:10 +0100 Subject: [PATCH 133/160] feat: bump monitoring to v3.3.0-rc.3, change secret template for minio-monitoring username and password --- kfd.yaml | 2 +- .../manifests/monitoring/patches/minio.root.env.tpl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index 41dd9ce23..63859b7b9 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -9,7 +9,7 @@ modules: dr: v3.0.0-rc.1 ingress: v3.0.1 logging: v4.0.0-rc.1 - monitoring: v3.3.0-rc.2 + monitoring: v3.3.0-rc.3 opa: v1.13.0 networking: v2.0.0-rc.2 tracing: v1.1.0 diff --git a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl index 15de617fa..0dec09ed2 100644 --- a/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/monitoring/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.monitoring.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.monitoring.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.monitoring.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.monitoring.minio.rootUser.username }} From 5f4789934ba9db4598ee8f1949f3611975168524 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 10:32:31 +0100 Subject: [PATCH 134/160] fix: bump monitoring rc, wrong secret name was used on the deployment --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 63859b7b9..c01d89a74 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -9,7 +9,7 @@ modules: dr: v3.0.0-rc.1 ingress: v3.0.1 logging: v4.0.0-rc.1 - monitoring: v3.3.0-rc.3 + monitoring: v3.3.0-rc.4 opa: v1.13.0 networking: v2.0.0-rc.2 tracing: v1.1.0 From 39d3b52a97180e1c6cd289ec15f0c21500fe95bd Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 10:37:27 +0100 Subject: [PATCH 135/160] feat: bump logging to v4.0.0-rc.2, change secret template for minio-logging --- kfd.yaml | 2 +- .../distribution/manifests/logging/patches/minio.root.env.tpl | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index c01d89a74..dd0865104 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,7 @@ modules: aws: v4.3.0 dr: v3.0.0-rc.1 ingress: v3.0.1 - logging: v4.0.0-rc.1 + logging: v4.0.0-rc.2 monitoring: v3.3.0-rc.4 opa: v1.13.0 networking: v2.0.0-rc.2 diff --git a/templates/distribution/manifests/logging/patches/minio.root.env.tpl b/templates/distribution/manifests/logging/patches/minio.root.env.tpl index e1ed7291c..0458f94b2 100644 --- a/templates/distribution/manifests/logging/patches/minio.root.env.tpl +++ b/templates/distribution/manifests/logging/patches/minio.root.env.tpl @@ -1,2 +1,2 @@ -ROOT_PASSWORD={{ .spec.distribution.modules.logging.minio.rootUser.password }} -ROOT_USER={{ .spec.distribution.modules.logging.minio.rootUser.username }} +rootPassword={{ .spec.distribution.modules.logging.minio.rootUser.password }} +rootUser={{ .spec.distribution.modules.logging.minio.rootUser.username }} From 8263a3672d3532f43785dc769242be29197c451a Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 10:39:49 +0100 Subject: [PATCH 136/160] feat(templates): add parameter to nodeSelector and tolerations helpers - Add a `returnEmptyInsteadOfNull` parameter to the `nodeSelector` and `tolerations` helper templates. Setting this to true will return and empty object `{}` instead of `null`. --- templates/distribution/_helpers.tpl | 8 ++++++++ .../ingress/resources/cert-manager-clusterissuer.yml.tpl | 5 +++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/templates/distribution/_helpers.tpl b/templates/distribution/_helpers.tpl index 597415e52..70c77ba6c 100644 --- a/templates/distribution/_helpers.tpl +++ b/templates/distribution/_helpers.tpl @@ -42,7 +42,11 @@ $moduleNodeSelector (index .spec.distribution.common "nodeSelector") -}} + {{- if and (not $nodeSelector) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}} + {{- "{}" | indent $indent | trim -}} + {{- else -}} {{- $nodeSelector | toYaml | indent $indent | trim -}} + {{- end -}} {{- end -}} {{- define "tolerations" -}} @@ -70,7 +74,11 @@ $moduleTolerations (index .spec.distribution.common "tolerations") -}} + {{- if and (not $tolerations) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}} + {{- "{}" | indent $indent | trim -}} + {{- else -}} {{- $tolerations | toYaml | indent $indent | trim -}} + {{- end -}} {{- end -}} {{ define "globalIngressClass" }} diff --git a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl index c2ca78d71..357bd2e2b 100644 --- a/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl +++ b/templates/distribution/manifests/ingress/resources/cert-manager-clusterissuer.yml.tpl @@ -36,9 +36,10 @@ spec: app: cert-manager spec: nodeSelector: - {{ template "nodeSelector" $certManagerArgs }} + {{- /* NOTE!: merge order is important below */}} + {{ template "nodeSelector" ( merge (dict "returnEmptyInsteadOfNull" true) $certManagerArgs ) }} tolerations: - {{ template "tolerations" ( merge (dict "indent" 16) $certManagerArgs ) }} + {{ template "tolerations" ( merge (dict "indent" 16 "returnEmptyInsteadOfNull" true) $certManagerArgs ) }} {{- end -}} {{- else if .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers }} solvers: From b83092a7ffdeb7c27ce6faa133588a6d6b415e57 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 10:49:54 +0100 Subject: [PATCH 137/160] fix(templates): tolerations should return list instead of object --- templates/distribution/_helpers.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/distribution/_helpers.tpl b/templates/distribution/_helpers.tpl index 70c77ba6c..6f5e2a26a 100644 --- a/templates/distribution/_helpers.tpl +++ b/templates/distribution/_helpers.tpl @@ -75,7 +75,7 @@ (index .spec.distribution.common "tolerations") -}} {{- if and (not $tolerations) (index . "returnEmptyInsteadOfNull") .returnEmptyInsteadOfNull -}} - {{- "{}" | indent $indent | trim -}} + {{- "[]" | indent $indent | trim -}} {{- else -}} {{- $tolerations | toYaml | indent $indent | trim -}} {{- end -}} From 322cd51fa96aea4fd9767f9cef27b4d5801631e2 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 11:51:46 +0100 Subject: [PATCH 138/160] fix: missing required parameter from EKSCluster schema (nodePoolGlobalAmiType) --- docs/schemas/ekscluster-kfd-v1alpha2.md | 2 +- pkg/apis/ekscluster/v1alpha2/private/schema.go | 5 ++++- pkg/apis/ekscluster/v1alpha2/public/schema.go | 5 ++++- schemas/private/ekscluster-kfd-v1alpha2.json | 3 ++- schemas/public/ekscluster-kfd-v1alpha2.json | 3 ++- 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index a165f8cf1..e4a0eed68 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -5022,7 +5022,7 @@ The network CIDR that will be used to assign IP addresses to the VPN clients whe | [logRetentionDays](#speckuberneteslogretentiondays) | `integer` | Optional | | [logsTypes](#speckuberneteslogstypes) | `array` | Optional | | [nodeAllowedSshPublicKey](#speckubernetesnodeallowedsshpublickey) | `object` | Required | -| [nodePoolGlobalAmiType](#speckubernetesnodepoolglobalamitype) | `string` | Optional | +| [nodePoolGlobalAmiType](#speckubernetesnodepoolglobalamitype) | `string` | Required | | [nodePools](#speckubernetesnodepools) | `array` | Required | | [nodePoolsLaunchKind](#speckubernetesnodepoolslaunchkind) | `string` | Required | | [serviceIpV4Cidr](#speckubernetesserviceipv4cidr) | `string` | Optional | diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index c99816b68..a96dea2a0 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -3985,7 +3985,7 @@ type SpecKubernetes struct { // Global default AMI type used for EKS worker nodes. This will apply to all node // pools unless overridden by a specific node pool. - NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"` // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` @@ -4025,6 +4025,9 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } + if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil { + return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required") + } if v, ok := raw["nodePools"]; !ok || v == nil { return fmt.Errorf("field nodePools in SpecKubernetes: required") } diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index ba2f64980..f63a2cc65 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -1578,7 +1578,7 @@ type SpecKubernetes struct { // Global default AMI type used for EKS worker nodes. This will apply to all node // pools unless overridden by a specific node pool. - NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + NodePoolGlobalAmiType SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType" yaml:"nodePoolGlobalAmiType" mapstructure:"nodePoolGlobalAmiType"` // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` @@ -3189,6 +3189,9 @@ func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } + if v, ok := raw["nodePoolGlobalAmiType"]; !ok || v == nil { + return fmt.Errorf("field nodePoolGlobalAmiType in SpecKubernetes: required") + } if v, ok := raw["nodePools"]; !ok || v == nil { return fmt.Errorf("field nodePools in SpecKubernetes: required") } diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index c8e05bcc9..0b82f017a 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -546,7 +546,8 @@ "apiServer", "nodeAllowedSshPublicKey", "nodePools", - "nodePoolsLaunchKind" + "nodePoolsLaunchKind", + "nodePoolGlobalAmiType" ] }, "Spec.Kubernetes.APIServer": { diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index e9534708c..d37497bc4 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -546,7 +546,8 @@ "apiServer", "nodeAllowedSshPublicKey", "nodePools", - "nodePoolsLaunchKind" + "nodePoolsLaunchKind", + "nodePoolGlobalAmiType" ] }, "Spec.Kubernetes.APIServer": { From 04821210079529bd543649c47172b8dde401298c Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 12:00:21 +0100 Subject: [PATCH 139/160] fix: fix ekscluster schema tests --- tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml | 1 + tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml | 2 +- tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml | 1 + tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml | 1 + 48 files changed, 48 insertions(+), 1 deletion(-) diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml index d5986d7c1..562c66dc4 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-no.yaml @@ -52,6 +52,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml index 4f18f6f91..6242d3fc3 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml index 5888b36c0..4ae4d5a2b 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-no.yaml @@ -19,6 +19,7 @@ spec: kubernetes: nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml index 5a3c32f26..a441afdec 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -29,6 +29,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml index e2e9d56a2..d1421afba 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-no.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml index 249c25a09..5629057b5 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml index 420b4840d..aa75e4853 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml index 580764001..4f0950e57 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml index ccb5cb6d6..f770659ef 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml index 34d2dd0af..afdfeb7d1 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml index 885d1e3a0..f1b942728 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-no.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml index b2365c265..7d0d4e962 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml index efdb5c4dc..ff0ed51dd 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml index 853a45c9b..3c098542f 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml index 93a4776c6..d01de7030 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml index f4069e810..9cbd6ec62 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -44,6 +44,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml index 51484c2b1..f857d991c 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml index f4069e810..9cbd6ec62 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -44,6 +44,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml index 621ac5be6..1b35fe071 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml index 59cbf0a0c..192ec94da 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -44,6 +44,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml index aaab0aa55..724b2849c 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-no.yaml @@ -37,6 +37,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml index 17284c184..ebc3c17c8 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -38,6 +38,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml index 65dcdeba9..8aab91a86 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-no.yaml @@ -37,6 +37,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml index 7a4dc98c0..f5787979d 100644 --- a/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml +++ b/tests/schemas/private/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -38,6 +38,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml index 763067fbf..5593b9eea 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-no.yaml @@ -52,6 +52,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml index fd25653f5..6dc006a1e 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/001-ok.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml index 23ba3cb2b..87c241192 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-no.yaml @@ -19,6 +19,7 @@ spec: kubernetes: nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml index 4ee0dbdb1..d0488d417 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/002-ok.yaml @@ -29,6 +29,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml index 1fb2e5ae9..6571a4e9e 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-no.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml index 6d4cdc170..3676cf950 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/003-ok.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml index 356843236..c9015eed3 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml index 502d91aa9..f532e7dd4 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/004-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml index 67e02a452..b1cb81933 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml index 1e74a70aa..b17f26041 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/005-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml index cec6acfe2..59ed50619 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-no.yaml @@ -46,6 +46,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml index 327d7f79c..aadc3b4ba 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/006-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml index efdb5c4dc..ff0ed51dd 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml index 3183a77cd..7c7798b76 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/007-ok.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml index 80d99e13a..5b5bb45f6 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml index d4dfd6ae2..65e71550c 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -43,7 +43,7 @@ spec: publicAccessCidrs: [] publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io - nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml index 0985fe398..7bc5e19aa 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml index 20cb35b93..242de8a65 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/009-ok.yaml @@ -44,6 +44,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml index cbae48b16..29cf21dbc 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-no.yaml @@ -45,6 +45,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml index b203f3327..ac7701cfe 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/010-ok.yaml @@ -44,6 +44,7 @@ spec: publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml index aaab0aa55..724b2849c 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-no.yaml @@ -37,6 +37,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml index 75f0c9e0e..0c0d66952 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/011-ok.yaml @@ -38,6 +38,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml index 65dcdeba9..8aab91a86 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-no.yaml @@ -37,6 +37,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml index 828309885..5b6353984 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/012-ok.yaml @@ -37,6 +37,7 @@ spec: - subnet-0123456789abcdef2 nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io nodePoolsLaunchKind: both + nodePoolGlobalAmiType: "alinux2" nodePools: - ami: id: ami-01234567890123456 From b3f81aa230935183d63bdd724630cebeb1df1a48 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 12:03:15 +0100 Subject: [PATCH 140/160] fix: ekscluster public 008-ok test missing parameter --- tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml index 65e71550c..4376557a4 100644 --- a/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml +++ b/tests/schemas/public/ekscluster-kfd-v1alpha2/008-ok.yaml @@ -43,6 +43,7 @@ spec: publicAccessCidrs: [] publicAccess: false nodeAllowedSshPublicKey: ssh-ed25519 SomethingSomething engineering@sighup.io + nodePoolsLaunchKind: both nodePoolGlobalAmiType: "alinux2" nodePools: - ami: From bad8d38ac5849f56f9bcd61dc3ca4d85d93879b4 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 12:11:52 +0100 Subject: [PATCH 141/160] chore(e2e): fix manifests for 1.29 and 1.30 --- tests/e2e-kfddistribution-upgrades.sh | 3 +++ .../kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml | 2 +- .../kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/e2e-kfddistribution-upgrades.sh b/tests/e2e-kfddistribution-upgrades.sh index ab13449e8..543772664 100755 --- a/tests/e2e-kfddistribution-upgrades.sh +++ b/tests/e2e-kfddistribution-upgrades.sh @@ -11,4 +11,7 @@ echo "Executing furyctl for the initial setup 1.29.4" echo "----------------------------------------------------------------------------" echo "Executing upgrade to 1.30.0" +# we set the switch date for Loki to "tomorrow". Notice that `-d flag` does not work on Darwin, you need to use `-v +1d` instead. +# this is needed only when upgrading from 1.29.4 to 1.30.0 (and equivalent versions) +yq -i ".spec.distribution.modules.logging.loki.tsdbStartDate=\"$(date -I -d '+1 day')\"" tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml /tmp/furyctl apply --upgrade --config tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml --outdir "$PWD" --distro-location ./ --force upgrades --disable-analytics diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml index d261e88cf..43ef4e72d 100644 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml +++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.29.4.yaml @@ -52,7 +52,7 @@ spec: type: kyverno kyverno: additionalExcludedNamespaces: ["local-path-storage"] - validationFailureAction: Enforce + validationFailureAction: enforce installDefaultPolicies: true dr: type: on-premises diff --git a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml index 26f848510..c9a4de25b 100644 --- a/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml +++ b/tests/e2e/kfddistribution-upgrades/furyctl-init-cluster-1.30.0.yaml @@ -32,6 +32,8 @@ spec: type: http01 logging: type: loki + loki: + tsdbStartDate: "2024-11-28" # this should be a day in the future when upgrading minio: storageSize: 20Gi rootUser: From ac11f7688ac0f9b61b843c4d7392c9c322a1a92a Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 12:21:29 +0100 Subject: [PATCH 142/160] feat: bump logging to v4.0.0-rc.3 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index dd0865104..35a12cd4d 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,7 @@ modules: aws: v4.3.0 dr: v3.0.0-rc.1 ingress: v3.0.1 - logging: v4.0.0-rc.2 + logging: v4.0.0-rc.3 monitoring: v3.3.0-rc.4 opa: v1.13.0 networking: v2.0.0-rc.2 From 93671a9c0668391e520084fdc11b52cfd6c67543 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 12:26:27 +0100 Subject: [PATCH 143/160] chore: change phrasing on job deletion --- templates/distribution/scripts/apply.sh.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/distribution/scripts/apply.sh.tpl b/templates/distribution/scripts/apply.sh.tpl index 33f57dd52..a8d0c883f 100644 --- a/templates/distribution/scripts/apply.sh.tpl +++ b/templates/distribution/scripts/apply.sh.tpl @@ -43,7 +43,7 @@ $kubectlbin create namespace calico-system --dry-run=client -o yaml | $kubectlbi < out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin apply -f - --server-side < out.yaml $yqbin 'select(.kind == "CustomResourceDefinition")' | $kubectlbin wait --for condition=established --timeout=60s -f - -echo "Clean up init jobs, since they cannot be changed without conficts and they are idempotent by nature..." +echo "Clean up old init jobs..." $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-setup -n kube-system $kubectlbin delete --ignore-not-found --wait --timeout=180s job minio-logging-buckets-setup -n logging From 12a3ebd5c27f49f8fbab4e22c70ed7eaf0fadba9 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 14:55:28 +0100 Subject: [PATCH 144/160] feat: add default snapshotclass for EKSCluster provider --- .../distribution/manifests/aws/kustomization.yaml.tpl | 3 ++- .../manifests/aws/resources/snapshotclasses.yml | 8 ++++++++ .../aws/resources/{sc.yml => storageclasses.yml} | 0 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 templates/distribution/manifests/aws/resources/snapshotclasses.yml rename templates/distribution/manifests/aws/resources/{sc.yml => storageclasses.yml} (100%) diff --git a/templates/distribution/manifests/aws/kustomization.yaml.tpl b/templates/distribution/manifests/aws/kustomization.yaml.tpl index 5fc017d20..dcfc507b0 100644 --- a/templates/distribution/manifests/aws/kustomization.yaml.tpl +++ b/templates/distribution/manifests/aws/kustomization.yaml.tpl @@ -13,7 +13,8 @@ resources: - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/snapshot-controller" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/load-balancer-controller" }} - {{ print "../" .spec.distribution.common.relativeVendorPath "/modules/aws/katalog/node-termination-handler" }} - - resources/sc.yml + - resources/storageclasses.yml + - resources/snapshotclasses.yml patchesStrategicMerge: - patches/cluster-autoscaler.yml diff --git a/templates/distribution/manifests/aws/resources/snapshotclasses.yml b/templates/distribution/manifests/aws/resources/snapshotclasses.yml new file mode 100644 index 000000000..e75210305 --- /dev/null +++ b/templates/distribution/manifests/aws/resources/snapshotclasses.yml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: ebs-sc + labels: + velero.io/csi-volumesnapshot-class: "true" +driver: ebs.csi.aws.com +deletionPolicy: Retain \ No newline at end of file diff --git a/templates/distribution/manifests/aws/resources/sc.yml b/templates/distribution/manifests/aws/resources/storageclasses.yml similarity index 100% rename from templates/distribution/manifests/aws/resources/sc.yml rename to templates/distribution/manifests/aws/resources/storageclasses.yml From 03784a156ae77cf5de59d1ca344cb47bf1d017a7 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 14:56:47 +0100 Subject: [PATCH 145/160] docs: add EKSCluster volumesnapshotclass note on release docs --- docs/releases/v1.30.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index 8016520d6..bd42de525 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -94,6 +94,7 @@ General example to enable Volume Snapshotting on rook-ceph (from our storage add deletionPolicy: Retain ``` `deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide volumesnapshotcontent CR will be preserved, maintaining the snapshot on the storage that the cluster is using. +**NOTE**: For EKSCluster provider, a default VolumeSnapshotClass is created automatically. - **DR optional snapshot-controller installation**: To leverage VolumeSnapshots on the OnPremises and KFDDistribution providers, a new option on velero has been added to install the snapshot-controller component. Before activating this parameter make sure that in your cluster there is not another snapshot-controller component deployed. By default this parameter is `false`. ```yaml From ada5ec2b9079753e5f496160fb4fb9c1fe2a4660 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 15:24:23 +0100 Subject: [PATCH 146/160] docs: added all versions informations on modules and installers --- docs/releases/v1.30.0.md | 50 +++++++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index bd42de525..b80123fcd 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -9,28 +9,56 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i ### Installer Updates - [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐Ÿ“ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) - - TBD + - Updated etcd default version to 3.5.15 + - Updated HAProxy version to 3.0 + - Updated containerd default version to 1.7.23 + - Added support for Kubernetes versions 1.30.6, 1.29.10 and 1.28.15 - [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.2.0**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.2.0) - - TBD + - Introduced AMI selection type: `alinux2023` and `alinux2` + - Fixed eks-managed nodepool node labels ### Module updates - [networking](https://github.com/sighupio/fury-kubernetes-networking) ๐Ÿ“ฆ core module: [**v2.0.0**](https://github.com/sighupio/fury-kubernetes-networking/releases/tag/v2.0.0) - - TBD + - Updated Tigera operator to v1.36.1 (that includes calico v3.29.0) + - Updated Cilium to v1.16.3 - [monitoring](https://github.com/sighupio/fury-kubernetes-monitoring) ๐Ÿ“ฆ core module: [**v3.3.0**](https://github.com/sighupio/fury-kubernetes-monitoring/releases/tag/v3.3.0) - - TBD + - Updated blackbox-exporter to v0.25.0 + - Updated grafana to v11.3.0 + - Updated kube-rbac-proxy to v0.18.1 + - Updated kube-state-metrics to v2.13.0 + - Updated node-exporter to v1.8.2 + - Updated prometheus-adapter to v0.12.0 + - Updated prometheus-operator to v0.76.2 + - Updated prometheus to v2.54.1 + - Updated x509-exporter to v3.17.0 + - Updated mimir to v2.14.0 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z - [logging](https://github.com/sighupio/fury-kubernetes-logging) ๐Ÿ“ฆ core module: [**v4.0.0**](https://github.com/sighupio/fury-kubernetes-logging/releases/tag/v4.0.0) - - TBD + - Updated opensearch and opensearch-dashboards to v2.17.1 + - Updated logging-operator to v4.10.0 + - Updated loki to v2.9.10 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z - [ingress](https://github.com/sighupio/fury-kubernetes-ingress) ๐Ÿ“ฆ core module: [**v3.0.1**](https://github.com/sighupio/fury-kubernetes-ingress/releases/tag/v3.0.1) - - TBD -- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**v0.X.0**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.X.0) - - TBD + - Updated cert-manager to v1.16.1 + - Updated external-dns to v0.15.0 + - Updated forecastle to v1.0.145 + - Updated nginx to v1.11.3 +- [auth](https://github.com/sighupio/fury-kubernetes-auth) ๐Ÿ“ฆ core module: [**v0.4.0**](https://github.com/sighupio/fury-kubernetes-auth/releases/tag/v0.4.0) + - Updated dex to v2.41.1 + - Updated pomerium to v0.27.1 - [dr](https://github.com/sighupio/fury-kubernetes-dr) ๐Ÿ“ฆ core module: [**v3.0.0**](https://github.com/sighupio/fury-kubernetes-dr/releases/tag/v3.0.0) - - TBD + - Updated velero to v1.15.0 + - Updated all velero plugins to v1.11.0 + - Added snapshot-controller v8.0.1 - [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**v1.1.0**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/v1.1.0) - - TBD + - Updated tempo to v2.6.0 + - Updated minio to version RELEASE.2024-10-13T13-34-11Z - [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**v4.3.0**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/v4.3.0) - - TBD + - Updated cluster-autoscaler to v1.30.0 + - Updated snapshot-controller to v8.1.0 + - Updated aws-load-balancer-controller to v2.10.0 + - Updated node-termination-handler to v1.22.0 ## New features ๐ŸŒŸ From cf60ba18e526073c75e6e4541a8bb9a2b9597615 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 15:28:36 +0100 Subject: [PATCH 147/160] docs: updated roadmap with achieved goals and missed goals --- ROADMAP.md | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index 54ef986bc..faa51cf64 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -6,21 +6,27 @@ From 2024, development will focus on hardening the distribution security-wise, p ## Q1 2024 -- KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x -- furyctl 0.28.x release -- Feature: Additional encryption parameters for ETCD on the OnPremises provider -- New project release: Gangplank, a forked and updated version of Gangway +- [x] KFD 1.28.x release and release of the latest supported patch version for 1.27.x and 1.26.x, drop support for 1.25.x +- [x] furyctl 0.28.x release +- [x] Feature: Additional encryption parameters for ETCD on the OnPremises provider +- [x] New project release: Gangplank, a forked and updated version of Gangway ## Q2 2024 -- KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x -- furyctl 0.29.x release -- Feature: Improved hardening for all the images used in the KFD distribution by default -- Feature: Improved network policies for the KFD infrastructural components +- [x] KFD 1.29.x release and release of the latest supported patch version for 1.28.x and 1.27.x, drop support for 1.26.x +- [x] furyctl 0.29.x release +- [ ] Feature: Improved hardening for all the images used in the KFD distribution by default +- [ ] Feature: Improved network policies for the KFD infrastructural components ## H2 2024 -- KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x -- furyctl 0.30.x release +- [x] KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x +- [x] furyctl 0.30.x release - Feature: Add support for secured container runtimes - Feature: Track dependencies provenance and dependencies signing +- [x] (from Q2 2024) Feature: Optional selection of improved hardened images used in the KFD distribution installation +- [x] (from Q2 2024) Feature: Experimental network policies for the KFD infrastructural components on the OnPremises provider +- [ ] KFD 1.31.x release +- [ ] furyctl 0.31.x release + + From aefafddb3dd32786fc65347b61ac3c06443527cf Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 15:29:34 +0100 Subject: [PATCH 148/160] chore: add empty checkboxes --- ROADMAP.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index faa51cf64..f5e0fb0a1 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -22,8 +22,8 @@ From 2024, development will focus on hardening the distribution security-wise, p - [x] KFD 1.30.x release and release of the latest supported patch version for 1.29.x and 1.28.x, drop support for 1.27.x - [x] furyctl 0.30.x release -- Feature: Add support for secured container runtimes -- Feature: Track dependencies provenance and dependencies signing +- [ ] Feature: Add support for secured container runtimes +- [ ] Feature: Track dependencies provenance and dependencies signing - [x] (from Q2 2024) Feature: Optional selection of improved hardened images used in the KFD distribution installation - [x] (from Q2 2024) Feature: Experimental network policies for the KFD infrastructural components on the OnPremises provider - [ ] KFD 1.31.x release From 291188517dd5988e38461a9c9f05ac4e9ab83f68 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 16:49:45 +0100 Subject: [PATCH 149/160] docs: v1.30.0 releas notes --- docs/releases/v1.30.0.md | 125 ++++++++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 41 deletions(-) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index b80123fcd..72d800ed2 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -1,8 +1,8 @@ # Kubernetes Fury Distribution Release v1.30.0 -Welcome to KFD release `v1.30.0`. +Welcome to KFD release `v1.30.0`. This is the first release of KFD supporting Kubernetes 1.30. -The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.io/) it is battle tested in production environments. +The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.io/). ## New Features since `v1.29.4` @@ -10,7 +10,7 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i - [on-premises](https://github.com/sighupio/fury-kubernetes-on-premises) ๐Ÿ“ฆ installer: [**v1.30.6**](https://github.com/sighupio/fury-kubernetes-on-premises/releases/tag/v1.30.6) - Updated etcd default version to 3.5.15 - - Updated HAProxy version to 3.0 + - Updated HAProxy version to 3.0 TLS - Updated containerd default version to 1.7.23 - Added support for Kubernetes versions 1.30.6, 1.29.10 and 1.28.15 - [eks](https://github.com/sighupio/fury-eks-installer) ๐Ÿ“ฆ installer: [**v3.2.0**](https://github.com/sighupio/fury-eks-installer/releases/tag/v3.2.0) @@ -60,53 +60,72 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i - Updated aws-load-balancer-controller to v2.10.0 - Updated node-termination-handler to v1.22.0 +## Breaking changes ๐Ÿ’” + +- **Loki store and schema change**: A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. See "[New features](#new-features-)" below for more details. +- **DR schema change**: A new format for the schedule customization has been introduced to improve the usability. See "[New Features](#new-features-)" section below for more details. +- **Kyverno validation failure action**: Kyverno has deprecated `audit` and `enforce` as valid options for the `validationFailureAction`, valid options are now `Audit` and `Enforce`, in title case. Adjust your `.spec.distribution.modules.policy.kyverno.validationFailureAction` value accordingly. + ## New features ๐ŸŒŸ -- **New option for Logging**: The Loki configuration has been extended to accommodate the new `tsdbStartDate` option to allow a migration towards TSDB and schema v13 (note: **this is a breaking change**): +- **New option for Logging**: Loki's configuration has been extended to accommodate a new `tsdbStartDate` **required** option to allow a migration towards TSDB and schema v13 storage (note: **this is a breaking change**): ```yaml ... - loki: - tsdbStartDate: "2024-11-18" + spec: + distribution: + modules: + logging: + loki: + tsdbStartDate: "2024-11-18" ... ``` - - `tsdbStartDate` (**required**): configures details for the schema config for the purpose of the migration + - `tsdbStartDate` (**required**): a string in `ISO 8601` date format that represents the day starting from which Loki will record logs with the new store and schema. - `tsdbStartDate` should be a string in `ISO 8601` date format and it represents the day starting from which Loki will record logs with the new store and schema. + โ„น๏ธ **Note**: Loki will assume the start of the day on the UTC midnight of the specified day. - โ„น๏ธ **Note**: Loki will assume the start of the day on the UTC midnight of the specified day. +- **Improved configurable schedules for DR backups**: the schedule configuration has been updated to enhance the usability of schedule customization (note: **this is a breaking change**): -- **DR improved configurable schedules**: The schedule configuration has been updated to enhance the usability of schedule customization (note: **this is a breaking change**): ```yaml ... - dr: - velero: - schedules: - install: true - definitions: - manifests: - schedule: "*/15 * * * *" - ttl: "720h0m0s" - full: - schedule: "0 1 * * *" - ttl: "720h0m0s" - snapshotMoveData: false + spec: + distribution: + modules: + dr: + velero: + schedules: + install: true + definitions: + manifests: + schedule: "*/15 * * * *" + ttl: "720h0m0s" + full: + schedule: "0 1 * * *" + ttl: "720h0m0s" + snapshotMoveData: false ... ``` -- **DR snapshotMoveData options for full schedule**: A new parameter has been introduced in the velero `full` schedule to enable the snapshotMoveData feature. This feature allows data captured from a snapshot to be copied to the object storage location. Important: Enabling this parameter will cause Velero to upload all data from the snapshotted volumes to S3 using Kopia. While backups are deduplicated, significant storage usage is still expected. To enable this parameter in the full schedule: + +- **DR snapshotMoveData options for full schedule**: a new parameter has been introduced in the velero `full` schedule to enable the `snapshotMoveData` feature. This feature allows data captured from a snapshot to be copied to the object storage location. **Important**: Setting this parameter to `true` will cause Velero to upload all data from the snapshotted volumes to S3 using Kopia. While backups are deduplicated, significant storage usage is still expected. To enable this use the following parameter in the full schedule configuration: + ```yaml ... - dr: - velero: - schedules: - install: true - definitions: - full: - snapshotMoveData: false + spec: + distribution: + modules: + dr: + velero: + schedules: + install: true + definitions: + full: + snapshotMoveData: true ... ``` + General example to enable Volume Snapshotting on rook-ceph (from our storage add-on module): + ```yaml apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass @@ -121,28 +140,52 @@ General example to enable Volume Snapshotting on rook-ceph (from our storage add csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph deletionPolicy: Retain ``` -`deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide volumesnapshotcontent CR will be preserved, maintaining the snapshot on the storage that the cluster is using. + +`deletionPolicy: Retain` is important because if the volume snapshot is deleted from the namespace, the cluster wide `volumesnapshotcontent` CR will be preserved, maintaining the snapshot on the storage that the cluster is using. + **NOTE**: For EKSCluster provider, a default VolumeSnapshotClass is created automatically. - **DR optional snapshot-controller installation**: To leverage VolumeSnapshots on the OnPremises and KFDDistribution providers, a new option on velero has been added to install the snapshot-controller component. Before activating this parameter make sure that in your cluster there is not another snapshot-controller component deployed. By default this parameter is `false`. + ```yaml ... - dr: - velero: - snapshotController: - install: true + spec: + distribution: + modules: + dr: + velero: + snapshotController: + install: true ... ``` -## Fixes ๐Ÿž +- **Prometheus ScrapeConfigs**: the Monitoring module now enables by default the `scrapeConfig` CRDs from the Prometheus Operator. All the scrapeConfig objects present in the cluster will now be detected by the operator. `ScrapeConfig`s objects are used to instruct Prometheus to scrape specific endpoints that could be outside the cluster. -- **TBD**: TBD. +- **Components Hardenning**: we hardened the security context of several components, improving the out-of-the-box security of the distribution. -## Breaking changes ๐Ÿ’” +- **On-premises minimal clusters**: it is now possible to create clusters with only control-plane nodes, for minimal clusters installations that need to handle minimal workloads. + +- **Helm Plugins**: Helm plugins now allow disabling validation at installation time with the `disableValidationOnInstall` option. This can be useful when installing Helm charts that fail the diff step on a first installation, for example. + +- **Network Policies** (experimental ๐Ÿงช): a new experimental feature is introduced in this version. You can now enable the installation of network policies that will restrict the traffic across all the infrastructural namespaces of KFD to just the access needed for its proper functioning and denying the rest of it. Improving the overall security of the cluster. This experimental feature is only available in OnPremises cluster at the moment. Read more in the [Pull Request](https://github.com/sighupio/fury-distribution/pull/302) introducing the feature and in the [relative documentation](https://github.com/sighupio/fury-distribution/tree/main/docs/network-policies). + +## Fixes ๐Ÿž -- **Loki store and schema change:** A new store and schema has been introduced in order to improve efficiency, speed and scalability of Loki clusters. -- **DR Schema change**: A new format for the schedule customization has been introduced to improve the usability. See New Features section for more informations. +- Improved Configuration Schema documentation: documentation for the configuration schemas was lacking, we greatly improved the quality and quantity of the documentation regarding each option in the schemas, for all the configuration kinds (OnPremises, EKSCluster, KFDDistribution). +- [[#264](https://github.com/sighupio/fury-distribution/pull/264)] Hubble UI: now is shown in the right group in the Directory +- [[#277](https://github.com/sighupio/fury-distribution/pull/277)] Hubble UI: make it work when auth type is SSO. +- [[#275](https://github.com/sighupio/fury-distribution/pull/275)] On-premises: use the `org` parameter for additional created users, it was being ignored before. +- [[#279](https://github.com/sighupio/fury-distribution/pull/279)] Monitoring: don't install x509 data plane on EKS clusters because it is not needed and triggers false alerts. +- [[#280](https://github.com/sighupio/fury-distribution/pull/280)] Migrations: fix migration from Auth type from `sso` to `basicAuth` and viceversa. +- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Migrations: some ingresses were not being deleted when migrating to Ingress type `none`. +- [[#281](https://github.com/sighupio/fury-distribution/pull/281)] Ingress: don't create TLS secret when ingress type is `none`. +- [[#283](https://github.com/sighupio/fury-distribution/pull/283)] EKS schema validation: fix DNS validation depending on if nginx is single, dual or none. +- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Monitoring: `minio-monitoring` ingress is now working when SSO is enabled. +- [[#291](https://github.com/sighupio/fury-distribution/pull/291)] Tracing: `minio-tracing` ingress is now created when Logging type is `none` and `auth.type` is `sso`. +- [[#293](https://github.com/sighupio/fury-distribution/pull/293)] Monitoring migrations: remove `minio-monitoring` ingress when migrating monitoring type from `mimir` to `none`. +- [[#301](https://github.com/sighupio/fury-distribution/pull/301)] Migrations: fix an error on the concatenation of kustomize bases. `external-dns` and `opensearch` are properly deleted now and no components are left behind. +- [[#310](https://github.com/sighupio/fury-distribution/pull/310)] Migrations: fix an error while migrating from auth type `none` to `sso` related to old ingresses not being deleted first. ## Upgrade procedure -Check the [upgrade docs](https://github.com/sighupio/furyctl/tree/main/docs/upgrades/kfd/README.md) for the detailed procedure. +Check the [upgrade docs](https://docs.kubernetesfury.com/docs/upgrades/upgrades) for the detailed procedure. From f93c8e64e805cfefc5dbb82389cd4a8d66555eb3 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 16:30:11 +0100 Subject: [PATCH 150/160] docs: add Global CVE patched images on the release note --- docs/releases/v1.30.0.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index 72d800ed2..e9948cda4 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -169,6 +169,16 @@ General example to enable Volume Snapshotting on rook-ceph (from our storage add - **Network Policies** (experimental ๐Ÿงช): a new experimental feature is introduced in this version. You can now enable the installation of network policies that will restrict the traffic across all the infrastructural namespaces of KFD to just the access needed for its proper functioning and denying the rest of it. Improving the overall security of the cluster. This experimental feature is only available in OnPremises cluster at the moment. Read more in the [Pull Request](https://github.com/sighupio/fury-distribution/pull/302) introducing the feature and in the [relative documentation](https://github.com/sighupio/fury-distribution/tree/main/docs/network-policies). +- **Global CVE patched images for core modules**: This distribution version includes images that have been patched for OS vulnerabilities (CVE). To use these patched images, select the following option: + ```yaml + ... + distribution: + common: + registry: registry.sighup.io/fury-secured + ... + ``` + + ## Fixes ๐Ÿž - Improved Configuration Schema documentation: documentation for the configuration schemas was lacking, we greatly improved the quality and quantity of the documentation regarding each option in the schemas, for all the configuration kinds (OnPremises, EKSCluster, KFDDistribution). From 879f6234c47c558e53a14184beab1c2122fdeaee Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 17:00:57 +0100 Subject: [PATCH 151/160] chore: move legacy releases under legacy folder --- docs/releases/{ => legacy}/v1.1.0.md | 0 docs/releases/{ => legacy}/v1.2.0.md | 0 docs/releases/{ => legacy}/v1.3.0.md | 0 docs/releases/{ => legacy}/v1.4.0.md | 0 docs/releases/{ => legacy}/v1.5.0.md | 0 docs/releases/{ => legacy}/v1.5.1.md | 0 docs/releases/{ => legacy}/v1.6.0.md | 0 docs/releases/{ => legacy}/v1.7.0.md | 0 docs/releases/{ => legacy}/v1.7.1.md | 0 9 files changed, 0 insertions(+), 0 deletions(-) rename docs/releases/{ => legacy}/v1.1.0.md (100%) rename docs/releases/{ => legacy}/v1.2.0.md (100%) rename docs/releases/{ => legacy}/v1.3.0.md (100%) rename docs/releases/{ => legacy}/v1.4.0.md (100%) rename docs/releases/{ => legacy}/v1.5.0.md (100%) rename docs/releases/{ => legacy}/v1.5.1.md (100%) rename docs/releases/{ => legacy}/v1.6.0.md (100%) rename docs/releases/{ => legacy}/v1.7.0.md (100%) rename docs/releases/{ => legacy}/v1.7.1.md (100%) diff --git a/docs/releases/v1.1.0.md b/docs/releases/legacy/v1.1.0.md similarity index 100% rename from docs/releases/v1.1.0.md rename to docs/releases/legacy/v1.1.0.md diff --git a/docs/releases/v1.2.0.md b/docs/releases/legacy/v1.2.0.md similarity index 100% rename from docs/releases/v1.2.0.md rename to docs/releases/legacy/v1.2.0.md diff --git a/docs/releases/v1.3.0.md b/docs/releases/legacy/v1.3.0.md similarity index 100% rename from docs/releases/v1.3.0.md rename to docs/releases/legacy/v1.3.0.md diff --git a/docs/releases/v1.4.0.md b/docs/releases/legacy/v1.4.0.md similarity index 100% rename from docs/releases/v1.4.0.md rename to docs/releases/legacy/v1.4.0.md diff --git a/docs/releases/v1.5.0.md b/docs/releases/legacy/v1.5.0.md similarity index 100% rename from docs/releases/v1.5.0.md rename to docs/releases/legacy/v1.5.0.md diff --git a/docs/releases/v1.5.1.md b/docs/releases/legacy/v1.5.1.md similarity index 100% rename from docs/releases/v1.5.1.md rename to docs/releases/legacy/v1.5.1.md diff --git a/docs/releases/v1.6.0.md b/docs/releases/legacy/v1.6.0.md similarity index 100% rename from docs/releases/v1.6.0.md rename to docs/releases/legacy/v1.6.0.md diff --git a/docs/releases/v1.7.0.md b/docs/releases/legacy/v1.7.0.md similarity index 100% rename from docs/releases/v1.7.0.md rename to docs/releases/legacy/v1.7.0.md diff --git a/docs/releases/v1.7.1.md b/docs/releases/legacy/v1.7.1.md similarity index 100% rename from docs/releases/v1.7.1.md rename to docs/releases/legacy/v1.7.1.md From b8c628392d3177629dae5d20ecff7659517fa37c Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 17:13:00 +0100 Subject: [PATCH 152/160] fix: missing opa module on release note, fixing auth version on main readme, fixed also some spelling error --- README.md | 2 +- docs/releases/v1.30.0.md | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 2e28aec31..42a69eaa6 100644 --- a/README.md +++ b/README.md @@ -181,7 +181,7 @@ KFD is open-source software and it's released under the following [LICENSE](LICE [tracing-version]: https://img.shields.io/badge/release-v1.1.0-blue [dr-version]: https://img.shields.io/badge/release-v3.0.0-blue [opa-version]: https://img.shields.io/badge/release-v1.13.0-blue -[auth-version]: https://img.shields.io/badge/release-v0.3.0-blue +[auth-version]: https://img.shields.io/badge/release-v0.4.0-blue diff --git a/docs/releases/v1.30.0.md b/docs/releases/v1.30.0.md index e9948cda4..bf96082d8 100644 --- a/docs/releases/v1.30.0.md +++ b/docs/releases/v1.30.0.md @@ -54,6 +54,10 @@ The distribution is maintained with โค๏ธ by the team [SIGHUP](https://sighup.i - [tracing](https://github.com/sighupio/fury-kubernetes-tracing) ๐Ÿ“ฆ core module: [**v1.1.0**](https://github.com/sighupio/fury-kubernetes-tracing/releases/tag/v1.1.0) - Updated tempo to v2.6.0 - Updated minio to version RELEASE.2024-10-13T13-34-11Z +- [opa](https://github.com/sighupio/fury-kubernetes-opa) ๐Ÿ“ฆ core module: [**v1.13.0**](https://github.com/sighupio/fury-kubernetes-opa/releases/tag/v1.13.0) + - Updated gatekeeper to v3.17.1 + - Updated gatekeeper-policy-manager to v1.0.13 + - Updated kyverno to v1.12.6 - [aws](https://github.com/sighupio/fury-kubernetes-aws) ๐Ÿ“ฆ module: [**v4.3.0**](https://github.com/sighupio/fury-kubernetes-aws/releases/tag/v4.3.0) - Updated cluster-autoscaler to v1.30.0 - Updated snapshot-controller to v8.1.0 @@ -159,9 +163,9 @@ General example to enable Volume Snapshotting on rook-ceph (from our storage add ... ``` -- **Prometheus ScrapeConfigs**: the Monitoring module now enables by default the `scrapeConfig` CRDs from the Prometheus Operator. All the scrapeConfig objects present in the cluster will now be detected by the operator. `ScrapeConfig`s objects are used to instruct Prometheus to scrape specific endpoints that could be outside the cluster. +- **Prometheus ScrapeConfigs**: the Monitoring module now enables by default the `scrapeConfig` CRDs from the Prometheus Operator. All the scrapeConfig objects present in the cluster will now be detected by the operator. `ScrapeConfig` objects are used to instruct Prometheus to scrape specific endpoints that could be outside the cluster. -- **Components Hardenning**: we hardened the security context of several components, improving the out-of-the-box security of the distribution. +- **Components Hardening**: we hardened the security context of several components, improving the out-of-the-box security of the distribution. - **On-premises minimal clusters**: it is now possible to create clusters with only control-plane nodes, for minimal clusters installations that need to handle minimal workloads. @@ -172,9 +176,10 @@ General example to enable Volume Snapshotting on rook-ceph (from our storage add - **Global CVE patched images for core modules**: This distribution version includes images that have been patched for OS vulnerabilities (CVE). To use these patched images, select the following option: ```yaml ... - distribution: - common: - registry: registry.sighup.io/fury-secured + spec: + distribution: + common: + registry: registry.sighup.io/fury-secured ... ``` From f04daf82fced4f4e051a05f269382ff776925fbe Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 17:36:01 +0100 Subject: [PATCH 153/160] feat: bump dr module on kfd.yaml to stable v3.0.0 --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 35a12cd4d..5b6912b02 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -6,7 +6,7 @@ version: v1.30.0 modules: auth: v0.4.0-rc.0 aws: v4.3.0 - dr: v3.0.0-rc.1 + dr: v3.0.0 ingress: v3.0.1 logging: v4.0.0-rc.3 monitoring: v3.3.0-rc.4 From eb267bd599b4f0ff4d65c90bccb19afe36650622 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 17:37:36 +0100 Subject: [PATCH 154/160] chore: removed deprecated Furyfile and kustomization.yaml file from the repository. Removed also the release step of these files. --- .drone.yml | 4 ---- Furyfile.yaml | 31 ------------------------------- kustomization.yaml | 44 -------------------------------------------- 3 files changed, 79 deletions(-) delete mode 100644 Furyfile.yaml delete mode 100644 kustomization.yaml diff --git a/.drone.yml b/.drone.yml index 2ceaab572..9be28af18 100644 --- a/.drone.yml +++ b/.drone.yml @@ -353,8 +353,6 @@ steps: from_secret: github_token file_exists: skip files: - - Furyfile.yaml - - kustomization.yaml - kfd.yaml prerelease: true overwrite: true @@ -380,8 +378,6 @@ steps: from_secret: github_token file_exists: skip files: - - Furyfile.yaml - - kustomization.yaml - kfd.yaml prerelease: false overwrite: true diff --git a/Furyfile.yaml b/Furyfile.yaml deleted file mode 100644 index 7d1557fc1..000000000 --- a/Furyfile.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2022 SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -versions: - auth: v0.3.0 - aws: v4.2.0 - dr: v2.3.0 - ingress: v2.3.3 - logging: v3.4.1 - monitoring: v3.2.0 - opa: v1.12.0 - networking: v1.17.0 - tracing: v1.0.3 - -bases: - - name: auth - - name: aws - - name: dr - - name: ingress - - name: logging - - name: monitoring - - name: networking - - name: opa - - name: tracing - -modules: - - name: aws - - name: dr - - name: ingress diff --git a/kustomization.yaml b/kustomization.yaml deleted file mode 100644 index efe55acd5..000000000 --- a/kustomization.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2017-present SIGHUP s.r.l All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# NB: This is a starting point for a kustomization.yaml file. It is not meant to be used in production as is. - -resources: - # Networking - - ./vendor/katalog/networking/calico - # OPA - - ./vendor/katalog/opa/gatekeeper/core - - ./vendor/katalog/opa/gatekeeper/rules/templates - - ./vendor/katalog/opa/gatekeeper/rules/config - - ./vendor/katalog/opa/gatekeeper/gpm - # Monitoring - - ./vendor/katalog/monitoring/prometheus-operator - - ./vendor/katalog/monitoring/prometheus-operated - - ./vendor/katalog/monitoring/grafana - - ./vendor/katalog/monitoring/kubeadm-sm - - ./vendor/katalog/monitoring/kube-proxy-metrics - - ./vendor/katalog/monitoring/kube-state-metrics - - ./vendor/katalog/monitoring/node-exporter - - ./vendor/katalog/monitoring/prometheus-adapter - - ./vendor/katalog/monitoring/alertmanager-operated - # Logging - - ./vendor/katalog/logging/opensearch-single - - ./vendor/katalog/logging/opensearch-dashboards - - ./vendor/katalog/logging/logging-operator - - ./vendor/katalog/logging/logging-operated - - ./vendor/katalog/logging/minio-ha - - ./vendor/katalog/logging/loki-distributed - - ./vendor/katalog/logging/configs - # Ingress - - ./vendor/katalog/ingress/cert-manager - - ./vendor/katalog/ingress/nginx - - ./vendor/katalog/ingress/forecastle - # DR - - ./vendor/katalog/dr/velero/velero-on-prem - - ./vendor/katalog/dr/velero/velero-schedules - - ./vendor/katalog/dr/velero/velero-node-agent From 58eb8e167ac01cf511b3acf6f58e16c8ac4f5b22 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 17:38:07 +0100 Subject: [PATCH 155/160] feat: bump auth to v0.4.0 final --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 5b6912b02..fe711eda0 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -4,7 +4,7 @@ version: v1.30.0 modules: - auth: v0.4.0-rc.0 + auth: v0.4.0 aws: v4.3.0 dr: v3.0.0 ingress: v3.0.1 From f4217910d70e32a33407a6b61be2b8843d59c190 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 17:38:26 +0100 Subject: [PATCH 156/160] feat: bump on-prem installer to v1.30.6 final --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index fe711eda0..e72191196 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -19,7 +19,7 @@ kubernetes: installer: v3.2.0 onpremises: version: 1.30.6 - installer: v1.30.6-rc.2 + installer: v1.30.6 furyctlSchemas: eks: - apiVersion: kfd.sighup.io/v1alpha2 From a8f302057e473bb549c64f225798846d78d53207 Mon Sep 17 00:00:00 2001 From: Ramiro Algozino Date: Wed, 27 Nov 2024 17:41:03 +0100 Subject: [PATCH 157/160] feat: bump monitoring to v3.3.0 final --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index e72191196..ab11c105b 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -9,7 +9,7 @@ modules: dr: v3.0.0 ingress: v3.0.1 logging: v4.0.0-rc.3 - monitoring: v3.3.0-rc.4 + monitoring: v3.3.0 opa: v1.13.0 networking: v2.0.0-rc.2 tracing: v1.1.0 From fb4ca3555d397191307b7584356ad2f0d4d24da9 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 18:04:57 +0100 Subject: [PATCH 158/160] feat: bump logging to v4.0.0 final --- kfd.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index ab11c105b..cf25728a3 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -8,7 +8,9 @@ modules: aws: v4.3.0 dr: v3.0.0 ingress: v3.0.1 - logging: v4.0.0-rc.3 + logging: v4.0.0 + monitoring: v3.3.0 + logging: v4.0.0 monitoring: v3.3.0 opa: v1.13.0 networking: v2.0.0-rc.2 From 700c59243ae8dec9bb1444646c65ca0fefe363a5 Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 18:06:39 +0100 Subject: [PATCH 159/160] fix: duplicated keys on kfd.yaml --- kfd.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/kfd.yaml b/kfd.yaml index cf25728a3..030092488 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -10,8 +10,6 @@ modules: ingress: v3.0.1 logging: v4.0.0 monitoring: v3.3.0 - logging: v4.0.0 - monitoring: v3.3.0 opa: v1.13.0 networking: v2.0.0-rc.2 tracing: v1.1.0 From cb84a4e95febdafba446d3aa779624b876066c1c Mon Sep 17 00:00:00 2001 From: Samuele Chiocca Date: Wed, 27 Nov 2024 18:20:20 +0100 Subject: [PATCH 160/160] feat: bump networking to v2.0.0 final --- kfd.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kfd.yaml b/kfd.yaml index 030092488..af7ef8d4b 100644 --- a/kfd.yaml +++ b/kfd.yaml @@ -11,7 +11,7 @@ modules: logging: v4.0.0 monitoring: v3.3.0 opa: v1.13.0 - networking: v2.0.0-rc.2 + networking: v2.0.0 tracing: v1.1.0 kubernetes: eks: