diff --git a/.dockerignore b/.dockerignore index 42b1005a11..7e03d2ea17 100644 --- a/.dockerignore +++ b/.dockerignore @@ -17,6 +17,9 @@ _artifacts Makefile **/Makefile +# add yaml files from internal/kubevip which are required for embedding. +!internal/kubevip/*.yaml + # ignores changes to test-only code to avoid extra rebuilds test/e2e/** diff --git a/.gitignore b/.gitignore index 1e0ca8c622..7203c391c8 100644 --- a/.gitignore +++ b/.gitignore @@ -19,12 +19,12 @@ _artifacts/ test/e2e/data/infrastructure-vsphere-govmomi/main/**/clusterclass-quick-start.yaml test/e2e/data/infrastructure-vsphere-govmomi/main/**/cluster-template*.yaml test/e2e/data/infrastructure-vsphere-govmomi/*/cluster-template*.yaml -test/e2e/data/infrastructure-vsphere-govmomi/*/clusterclass-quick-start.yaml +test/e2e/data/infrastructure-vsphere-govmomi/*/clusterclass-quick-start*.yaml test/e2e/data/infrastructure-vsphere-supervisor/main/**/clusterclass-quick-start-supervisor.yaml test/e2e/data/infrastructure-vsphere-supervisor/main/**/cluster-template*.yaml test/e2e/data/infrastructure-vsphere-supervisor/*/cluster-template*.yaml -test/e2e/data/infrastructure-vsphere-supervisor/*/clusterclass-quick-start-supervisor.yaml +test/e2e/data/infrastructure-vsphere-supervisor/*/clusterclass-quick-start*-supervisor.yaml # env vars file used in getting-started.md and manifests generation envvars.txt diff --git a/Makefile b/Makefile index 108818aeda..540cd00969 100644 --- a/Makefile +++ b/Makefile @@ -60,6 +60,7 @@ BUILD_DIR := .build TEST_DIR := test VCSIM_DIR := test/infrastructure/vcsim NETOP_DIR := test/infrastructure/net-operator +TEST_EXTENSION_DIR := test/extension TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR)) FLAVOR_DIR := $(ROOT_DIR)/templates @@ -233,6 +234,10 @@ VM_OPERATOR_ALL_ARCH = amd64 arm64 NET_OPERATOR_IMAGE_NAME ?= cluster-api-net-operator NET_OPERATOR_IMG ?= $(STAGING_REGISTRY)/$(NET_OPERATOR_IMAGE_NAME) +# test-extension +TEST_EXTENSION_IMAGE_NAME ?= cluster-api-vsphere-test-extension +TEST_EXTENSION_IMG ?= $(STAGING_REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME) + # boskosctl BOSKOSCTL_IMG ?= gcr.io/k8s-staging-capi-vsphere/extra/boskosctl BOSKOSCTL_IMG_TAG ?= $(shell git describe --always --dirty) @@ -271,6 +276,7 @@ SUPERVISOR_WEBHOOK_ROOT ?= $(MANIFEST_ROOT)/supervisor/webhook RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac VCSIM_RBAC_ROOT ?= $(VCSIM_DIR)/config/rbac NETOP_RBAC_ROOT ?= $(NETOP_DIR)/config/rbac +TEST_EXTENSION_RBAC_ROOT ?= $(TEST_EXTENSION_DIR)/config/rbac JANITOR_DIR ?= ./$(TOOLS_DIR)/janitor @@ -318,6 +324,11 @@ generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. paths=./$(NETOP_DIR)/controllers/... \ output:rbac:dir=$(NETOP_RBAC_ROOT) \ rbac:roleName=manager-role + # test-extension is used for Runtime SDK tests + $(CONTROLLER_GEN) \ + paths=./$(TEST_EXTENSION_DIR)/... \ + output:rbac:dir=$(TEST_EXTENSION_RBAC_ROOT) \ + rbac:roleName=manager-role # vcsim crds are used for tests. $(CONTROLLER_GEN) \ paths=./$(VCSIM_DIR)/api/v1alpha1 \ @@ -377,8 +388,10 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai # generate clusterclass and cluster topology cp "$(RELEASE_DIR)/main/clusterclass-template.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass/clusterclass-quick-start.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-runtimesdk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-quick-start-runtimesdk.yaml" cp "$(RELEASE_DIR)/main/cluster-template-topology.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/topology/cluster-template-topology.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/topology" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-topology.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/topology-runtimesdk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-topology-runtimesdk.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/install-on-bootstrap" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-install-on-bootstrap.yaml" # for PCI passthrough template "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/pci" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-pci.yaml" @@ -392,8 +405,10 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/base" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-supervisor.yaml" cp "$(RELEASE_DIR)/main/clusterclass-template-supervisor.yaml" "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/clusterclass/clusterclass-quick-start-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/clusterclass" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/clusterclass-quick-start-supervisor.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/clusterclass-runtimesdk" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/clusterclass-quick-start-supervisor-runtimesdk.yaml" cp "$(RELEASE_DIR)/main/cluster-template-topology-supervisor.yaml" "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/topology/cluster-template-topology-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/topology" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-topology-supervisor.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/topology-runtimesdk" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-topology-runtimesdk-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/conformance" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-conformance-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/install-on-bootstrap" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-install-on-bootstrap-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/ownerrefs-finalizers" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" @@ -576,6 +591,15 @@ docker-build-net-operator: docker-pull-prerequisites ## Build the docker image f $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(NETOP_DIR)/config/default/manager_pull_policy.yaml"; \ fi +.PHONY: docker-build-test-extension +docker-build-test-extension: docker-pull-prerequisites ## Build the docker image for test-extension controller manager +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat $(TEST_EXTENSION_DIR)/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file - + @if [ "${DOCKER_BUILD_MODIFY_MANIFESTS}" = "true" ]; then \ + $(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_image_patch.yaml"; \ + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_pull_policy.yaml"; \ + fi + .PHONY: docker-build-boskosctl docker-build-boskosctl: cat hack/tools/boskosctl/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) . -t $(BOSKOSCTL_IMG):$(BOSKOSCTL_IMG_TAG) --file - @@ -643,6 +667,7 @@ e2e-images: ## Build the e2e manager image $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-vcsim $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-net-operator + $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-test-extension .PHONY: e2e e2e: e2e-images generate-e2e-templates diff --git a/config/base/manager_pull_policy.yaml b/config/base/manager_pull_policy.yaml index cd7ae12c01..74a0879c60 100644 --- a/config/base/manager_pull_policy.yaml +++ b/config/base/manager_pull_policy.yaml @@ -8,4 +8,4 @@ spec: spec: containers: - name: manager - imagePullPolicy: IfNotPresent + imagePullPolicy: Always diff --git a/internal/clusterclass/variables.go b/internal/clusterclass/variables.go new file mode 100644 index 0000000000..e9a5b5aee3 --- /dev/null +++ b/internal/clusterclass/variables.go @@ -0,0 +1,101 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package clusterclass provides the shared functions for creating clusterclasses. +package clusterclass + +import ( + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// GetClusterClassVariables provides the variables for the clusterclass. +// In govmomi mode it has additional variables. +func GetClusterClassVariables(govmomiMode bool) []clusterv1.ClusterClassVariable { + variables := []clusterv1.ClusterClassVariable{ + { + Name: "sshKey", + Required: false, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Description: "Public key to SSH onto the cluster nodes.", + Type: "string", + }, + }, + }, + { + Name: "controlPlaneIpAddr", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "Floating VIP for the control plane.", + }, + }, + }, + { + Name: "controlPlanePort", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "integer", + Description: "Port for the control plane endpoint.", + }, + }, + }, + { + Name: "kubeVipPodManifest", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "kube-vip manifest for the control plane.", + }, + }, + }, + } + + if govmomiMode { + varForNoneSupervisorMode := []clusterv1.ClusterClassVariable{ + { + Name: "infraServer", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]clusterv1.JSONSchemaProps{ + "url": {Type: "string"}, + "thumbprint": {Type: "string"}, + }, + }, + }, + }, + { + Name: "credsSecretName", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "Secret containing the credentials for the infra cluster.", + }, + }, + }, + } + + variables = append(variables, varForNoneSupervisorMode...) + } + + return variables +} diff --git a/packaging/flavorgen/flavors/kubevip/files.go b/internal/kubevip/files.go similarity index 90% rename from packaging/flavorgen/flavors/kubevip/files.go rename to internal/kubevip/files.go index ee0ce3bb0d..64f2c06240 100644 --- a/packaging/flavorgen/flavors/kubevip/files.go +++ b/internal/kubevip/files.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package kubevip provides the files required to run kube-vip in a cluster. package kubevip import ( @@ -38,12 +39,13 @@ var ( kubeVipPodRaw string ) -func newKubeVIPFiles() []bootstrapv1.File { +// Files returns the files required for a control plane node to run kube-vip. +func Files() []bootstrapv1.File { return []bootstrapv1.File{ { Owner: "root:root", Path: "/etc/kubernetes/manifests/kube-vip.yaml", - Content: kubeVIPPodYAML(), + Content: PodYAML(), Permissions: "0644", }, // This file is part of the workaround for https://github.com/kube-vip/kube-vip/issues/692 @@ -63,7 +65,8 @@ func newKubeVIPFiles() []bootstrapv1.File { } } -func kubeVIPPodYAML() string { +// PodYAML returns the static pod manifest required to run kube-vip. +func PodYAML() string { pod := &corev1.Pod{} if err := yaml.Unmarshal([]byte(kubeVipPodRaw), pod); err != nil { diff --git a/packaging/flavorgen/flavors/kubevip/kube-vip-prepare.sh b/internal/kubevip/kube-vip-prepare.sh similarity index 100% rename from packaging/flavorgen/flavors/kubevip/kube-vip-prepare.sh rename to internal/kubevip/kube-vip-prepare.sh diff --git a/packaging/flavorgen/flavors/kubevip/kube-vip.yaml b/internal/kubevip/kube-vip.yaml similarity index 100% rename from packaging/flavorgen/flavors/kubevip/kube-vip.yaml rename to internal/kubevip/kube-vip.yaml diff --git a/packaging/flavorgen/flavors/clusterclass_generators.go b/packaging/flavorgen/flavors/clusterclass_generators.go index 0e5382b10c..d49cd305ac 100644 --- a/packaging/flavorgen/flavors/clusterclass_generators.go +++ b/packaging/flavorgen/flavors/clusterclass_generators.go @@ -30,6 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/internal/clusterclass" "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/env" "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/kubevip" "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/util" @@ -55,7 +56,7 @@ func newClusterClass() clusterv1.ClusterClass { }, ControlPlane: getControlPlaneClass(), Workers: getWorkersClass(), - Variables: getClusterClassVariables(false), + Variables: clusterclass.GetClusterClassVariables(true), Patches: getClusterClassPatches(), }, } @@ -81,7 +82,7 @@ func newVMWareClusterClass() clusterv1.ClusterClass { }, ControlPlane: getVMWareControlPlaneClass(), Workers: getVMWareWorkersClass(), - Variables: getClusterClassVariables(true), + Variables: clusterclass.GetClusterClassVariables(false), Patches: getVMWareClusterClassPatches(), }, } @@ -238,83 +239,6 @@ func getEnableSSHIntoNodesTemplate() *string { return ptr.To(string(templateStr)) } -func getClusterClassVariables(supervisorMode bool) []clusterv1.ClusterClassVariable { - variables := []clusterv1.ClusterClassVariable{ - { - Name: "sshKey", - Required: false, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Description: "Public key to SSH onto the cluster nodes.", - Type: "string", - }, - }, - }, - { - Name: "controlPlaneIpAddr", - Required: true, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Type: "string", - Description: "Floating VIP for the control plane.", - }, - }, - }, - { - Name: "controlPlanePort", - Required: true, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Type: "integer", - Description: "Port for the control plane endpoint.", - }, - }, - }, - { - Name: "kubeVipPodManifest", - Required: true, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Type: "string", - Description: "kube-vip manifest for the control plane.", - }, - }, - }, - } - - if !supervisorMode { - varForNoneSupervisorMode := []clusterv1.ClusterClassVariable{ - { - Name: "infraServer", - Required: true, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Type: "object", - Properties: map[string]clusterv1.JSONSchemaProps{ - "url": {Type: "string"}, - "thumbprint": {Type: "string"}, - }, - }, - }, - }, - { - Name: "credsSecretName", - Required: true, - Schema: clusterv1.VariableSchema{ - OpenAPIV3Schema: clusterv1.JSONSchemaProps{ - Type: "string", - Description: "Secret containing the credentials for the infra cluster.", - }, - }, - }, - } - - variables = append(variables, varForNoneSupervisorMode...) - } - - return variables -} - func newVSphereClusterTemplate() infrav1.VSphereClusterTemplate { return infrav1.VSphereClusterTemplate{ TypeMeta: metav1.TypeMeta{ diff --git a/packaging/flavorgen/flavors/kubevip/kubevip.go b/packaging/flavorgen/flavors/kubevip/kubevip.go index c18ead9c79..a78d8003cf 100644 --- a/packaging/flavorgen/flavors/kubevip/kubevip.go +++ b/packaging/flavorgen/flavors/kubevip/kubevip.go @@ -19,9 +19,11 @@ package kubevip import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + + "sigs.k8s.io/cluster-api-provider-vsphere/internal/kubevip" ) // PatchControlPlane adds kube-vip to a KubeadmControlPlane object. func PatchControlPlane(cp *controlplanev1.KubeadmControlPlane) { - cp.Spec.KubeadmConfigSpec.Files = append(cp.Spec.KubeadmConfigSpec.Files, newKubeVIPFiles()...) + cp.Spec.KubeadmConfigSpec.Files = append(cp.Spec.KubeadmConfigSpec.Files, kubevip.Files()...) } diff --git a/packaging/flavorgen/flavors/kubevip/topology.go b/packaging/flavorgen/flavors/kubevip/topology.go index 5d15bbb7fb..2dd0d7b17a 100644 --- a/packaging/flavorgen/flavors/kubevip/topology.go +++ b/packaging/flavorgen/flavors/kubevip/topology.go @@ -30,12 +30,13 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/yaml" + "sigs.k8s.io/cluster-api-provider-vsphere/internal/kubevip" "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/util" ) // TopologyVariable returns the ClusterClass variable for kube-vip. func TopologyVariable() (*clusterv1.ClusterVariable, error) { - out, err := json.Marshal(kubeVIPPodYAML()) + out, err := json.Marshal(kubevip.PodYAML()) if err != nil { return nil, errors.Wrapf(err, "failed to json-encode variable kubeVipPod") } @@ -52,7 +53,7 @@ func TopologyVariable() (*clusterv1.ClusterVariable, error) { func TopologyPatch() clusterv1.ClusterClassPatch { patches := []clusterv1.JSONPatch{} - for _, f := range newKubeVIPFiles() { + for _, f := range kubevip.Files() { p := clusterv1.JSONPatch{ Op: "add", Path: "/spec/template/spec/kubeadmConfigSpec/files/-", diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go new file mode 100644 index 0000000000..b597eaf608 --- /dev/null +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "github.com/blang/semver/v4" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" +) + +var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [vcsim] [supervisor] [ClusterClass]", func() { + const specName = "k8s-upgrade-with-runtimesdk" // aligned to CAPI + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { + capi_e2e.ClusterUpgradeWithRuntimeSDKSpec(ctx, func() capi_e2e.ClusterUpgradeWithRuntimeSDKSpecInput { + version, err := semver.ParseTolerant(e2eConfig.GetVariable(capi_e2e.KubernetesVersionUpgradeFrom)) + Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version") + if version.LT(semver.MustParse("1.24.0")) { + Fail("This test only supports upgrades from Kubernetes >= v1.24.0") + } + + return capi_e2e.ClusterUpgradeWithRuntimeSDKSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + PostUpgrade: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that the resourceVersions are stable, i.e. it verifies there are no + // continuous reconciles when everything should be stable. + framework.ValidateResourceVersionStable(ctx, proxy, namespace, FilterObjectsWithKindAndName(clusterName)) + }, + // "topology-runtimesdk" is the same as the "topology" flavor but with an additional RuntimeExtension. + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("topology-runtimesdk")), + ExtensionServiceNamespace: "capv-test-extension", + ExtensionServiceName: "capv-test-extension-webhook-service", + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, + } + }) + }) +}) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index a7f8cb30a1..6f5a982588 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -16,6 +16,8 @@ images: loadBehavior: mustLoad - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-net-operator-{ARCH}:dev loadBehavior: mustLoad + - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-test-extension-{ARCH}:dev + loadBehavior: mustLoad - name: gcr.io/k8s-staging-capi-vsphere/extra/vm-operator:v1.8.6-0-gde75746a loadBehavior: tryLoad @@ -149,11 +151,15 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-pci.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-storage-policy.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-topology.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-topology-runtimesdk.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-quick-start.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-quick-start-runtimesdk.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-topology-supervisor.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-topology-runtimesdk-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-quick-start-supervisor.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-quick-start-supervisor-runtimesdk.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-install-on-bootstrap-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-conformance-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" @@ -219,6 +225,19 @@ providers: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: capv-test-extension + type: RuntimeExtensionProvider + versions: + - name: v1.11.99 + # Use manifest from source files + value: ../../../../cluster-api-provider-vsphere/test/extension/config/default + contract: v1beta1 + files: + - sourcePath: "../data/shared/capv/main/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + variables: # Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml KUBERNETES_VERSION: "v1.30.0" @@ -256,11 +275,13 @@ variables: KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml" NODE_DRAIN_TIMEOUT: "60s" CLUSTER_TOPOLOGY: "true" + EXP_RUNTIME_SDK: "true" # These IDs correspond to Tesla T4s, they are the decimal representation of the hex values. DEVICE_ID: 7864 VENDOR_ID: 4318 # CAPV feature flags EXP_NODE_ANTI_AFFINITY: "true" + EXP_MACHINE_SET_PREFLIGHT_CHECKS: "true" CAPI_DIAGNOSTICS_ADDRESS: ":8080" CAPI_INSECURE_DIAGNOSTICS: "true" # Required to be set to install capv-supervisor <= v1.10. diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/kustomization.yaml new file mode 100644 index 0000000000..dfaf3aa974 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../clusterclass +patches: + - target: + kind: ClusterClass + path: ./patch-test-extension.yaml + - target: + kind: ClusterClass + path: ./name.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/name.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/name.yaml new file mode 100644 index 0000000000..c65d3025c0 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/name.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /metadata/name + value: ${CLUSTER_CLASS_NAME}-runtimesdk \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/patch-test-extension.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/patch-test-extension.yaml new file mode 100644 index 0000000000..6ac7dfab75 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass-runtimesdk/patch-test-extension.yaml @@ -0,0 +1,12 @@ +- op: replace + path: /spec/patches + value: + - name: test-patch + external: + generateExtension: generate-patches.k8s-upgrade-with-runtimesdk + validateExtension: validate-topology.k8s-upgrade-with-runtimesdk + discoverVariablesExtension: discover-variables.k8s-upgrade-with-runtimesdk + settings: + testMode: govmomi +- op: remove + path: /spec/variables diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/clusterclass.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/clusterclass.yaml new file mode 100644 index 0000000000..3da8f52ef0 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/clusterclass.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /spec/topology/class + value: ${CLUSTER_CLASS_NAME}-runtimesdk \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/kustomization.yaml new file mode 100644 index 0000000000..7e1977951b --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/topology-runtimesdk/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../topology +patches: + - target: + kind: Cluster + path: ./clusterclass.yaml diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/kustomization.yaml new file mode 100644 index 0000000000..dfaf3aa974 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../clusterclass +patches: + - target: + kind: ClusterClass + path: ./patch-test-extension.yaml + - target: + kind: ClusterClass + path: ./name.yaml diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/name.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/name.yaml new file mode 100644 index 0000000000..6e69902e0b --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/name.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /metadata/name + value: ${CLUSTER_CLASS_NAME}-runtimesdk diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/patch-test-extension.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/patch-test-extension.yaml new file mode 100644 index 0000000000..9f879e5b58 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass-runtimesdk/patch-test-extension.yaml @@ -0,0 +1,12 @@ +- op: replace + path: /spec/patches + value: + - name: test-patch + external: + generateExtension: generate-patches.k8s-upgrade-with-runtimesdk + validateExtension: validate-topology.k8s-upgrade-with-runtimesdk + discoverVariablesExtension: discover-variables.k8s-upgrade-with-runtimesdk + settings: + testMode: supervisor +- op: remove + path: /spec/variables diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/clusterclass.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/clusterclass.yaml new file mode 100644 index 0000000000..3da8f52ef0 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/clusterclass.yaml @@ -0,0 +1,3 @@ +- op: replace + path: /spec/topology/class + value: ${CLUSTER_CLASS_NAME}-runtimesdk \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/kustomization.yaml new file mode 100644 index 0000000000..7e1977951b --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/topology-runtimesdk/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../topology +patches: + - target: + kind: Cluster + path: ./clusterclass.yaml diff --git a/test/extension/Dockerfile b/test/extension/Dockerfile new file mode 100644 index 0000000000..bc388f2e43 --- /dev/null +++ b/test/extension/Dockerfile @@ -0,0 +1,83 @@ +# syntax=docker/dockerfile:1.4 + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +# Run this with docker build --build-arg builder_image= +ARG builder_image + +# Build architecture +ARG ARCH + +# Ignore Hadolint rule "Always tag the version of an image explicitly." +# It's an invalid finding since the image is explicitly set in the Makefile. +# https://github.com/hadolint/hadolint/wiki/DL3006 +# hadolint ignore=DL3006 +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Change directories into the test go module +WORKDIR /workspace/test + +# Copy the Go Modules manifests +COPY test/go.mod go.mod +COPY test/go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# This needs to build with the entire CAPV context +WORKDIR /workspace + +# Copy the sources (which includes the test/infrastructure/vcsim subdirectory) +COPY ./ ./ + +# Change directories into test/extension +WORKDIR /workspace/test/extension + +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG package=. +ARG ARCH +ARG ldflags + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" \ + -o manager ${package} + + +FROM gcr.io/distroless/static:nonroot-${ARCH} +WORKDIR / +COPY --from=builder /workspace/test/extension/manager . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/manager"] diff --git a/test/extension/README.md b/test/extension/README.md new file mode 100644 index 0000000000..15e7bece51 --- /dev/null +++ b/test/extension/README.md @@ -0,0 +1,3 @@ +# CAPV test-extension + +Provide a minimal implementation of a Runtime SDK test-extension. diff --git a/test/extension/config/certmanager/certificate.yaml b/test/extension/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..4079986e89 --- /dev/null +++ b/test/extension/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/test/extension/config/certmanager/kustomization.yaml b/test/extension/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..95f333f3f7 --- /dev/null +++ b/test/extension/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - certificate.yaml + +configurations: + - kustomizeconfig.yaml diff --git a/test/extension/config/certmanager/kustomizeconfig.yaml b/test/extension/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..c6a6c0f1e0 --- /dev/null +++ b/test/extension/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: + - kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: + - kind: Certificate + group: cert-manager.io + path: spec/commonName + - kind: Certificate + group: cert-manager.io + path: spec/dnsNames + - kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/test/extension/config/default/kustomization.yaml b/test/extension/config/default/kustomization.yaml new file mode 100644 index 0000000000..6afdad8832 --- /dev/null +++ b/test/extension/config/default/kustomization.yaml @@ -0,0 +1,52 @@ +namespace: capv-test-extension + +namePrefix: capv-test-extension- + +commonLabels: + cluster.x-k8s.io/provider: "runtime-extension-capv-test" + +resources: + - namespace.yaml + +bases: + - ../rbac + - ../manager + - ../webhook + - ../certmanager + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + - manager_webhook_patch.yaml + +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/test/extension/config/default/kustomizeconfig.yaml b/test/extension/config/default/kustomizeconfig.yaml new file mode 100644 index 0000000000..eb191e64d0 --- /dev/null +++ b/test/extension/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/test/extension/config/default/manager_image_patch.yaml b/test/extension/config/default/manager_image_patch.yaml new file mode 100644 index 0000000000..ba67a7d45a --- /dev/null +++ b/test/extension/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-test-extension:dev + name: manager diff --git a/test/extension/config/default/manager_pull_policy.yaml b/test/extension/config/default/manager_pull_policy.yaml new file mode 100644 index 0000000000..74a0879c60 --- /dev/null +++ b/test/extension/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/test/extension/config/default/manager_webhook_patch.yaml b/test/extension/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..f18fd10f99 --- /dev/null +++ b/test/extension/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + diff --git a/test/extension/config/default/namespace.yaml b/test/extension/config/default/namespace.yaml new file mode 100644 index 0000000000..8b55c3cd89 --- /dev/null +++ b/test/extension/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/test/extension/config/manager/kustomization.yaml b/test/extension/config/manager/kustomization.yaml new file mode 100644 index 0000000000..5c5f0b84cb --- /dev/null +++ b/test/extension/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/test/extension/config/manager/manager.yaml b/test/extension/config/manager/manager.yaml new file mode 100644 index 0000000000..71697ed8e3 --- /dev/null +++ b/test/extension/config/manager/manager.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + image: controller:latest + name: manager + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsUser: 65532 + runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/test/extension/config/rbac/kustomization.yaml b/test/extension/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..e82521ffdc --- /dev/null +++ b/test/extension/config/rbac/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- role.yaml +- role_binding.yaml +- service_account.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml diff --git a/test/extension/config/rbac/leader_election_role.yaml b/test/extension/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..23055e187d --- /dev/null +++ b/test/extension/config/rbac/leader_election_role.yaml @@ -0,0 +1,24 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/test/extension/config/rbac/leader_election_role_binding.yaml b/test/extension/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000000..d5e0044679 --- /dev/null +++ b/test/extension/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/extension/config/rbac/role.yaml b/test/extension/config/rbac/role.yaml new file mode 100644 index 0000000000..8e90952e79 --- /dev/null +++ b/test/extension/config/rbac/role.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/test/extension/config/rbac/role_binding.yaml b/test/extension/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..5a95f66d6f --- /dev/null +++ b/test/extension/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/extension/config/rbac/service_account.yaml b/test/extension/config/rbac/service_account.yaml new file mode 100644 index 0000000000..77f747b53c --- /dev/null +++ b/test/extension/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/test/extension/config/tilt/extensionconfig.yaml b/test/extension/config/tilt/extensionconfig.yaml new file mode 100644 index 0000000000..795af86cff --- /dev/null +++ b/test/extension/config/tilt/extensionconfig.yaml @@ -0,0 +1,18 @@ +apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +kind: ExtensionConfig +metadata: + annotations: + runtime.cluster.x-k8s.io/inject-ca-from-secret: capv-test-extension/capv-test-extension-webhook-service-cert + name: capv-test-extension +spec: + clientConfig: + service: + name: capv-test-extension-webhook-service + namespace: capv-test-extension # Note: this assumes the test extension get deployed in the default namespace defined in its own runtime-extensions-components.yaml + port: 443 + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - default # Note: this assumes the test extension is used by Cluster in the default namespace only \ No newline at end of file diff --git a/test/extension/config/webhook/kustomization.yaml b/test/extension/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..66157d5d5f --- /dev/null +++ b/test/extension/config/webhook/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/test/extension/config/webhook/kustomizeconfig.yaml b/test/extension/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..345ca49278 --- /dev/null +++ b/test/extension/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,5 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. + +varReference: +- path: metadata/annotations diff --git a/test/extension/config/webhook/service.yaml b/test/extension/config/webhook/service.yaml new file mode 100644 index 0000000000..711977f54f --- /dev/null +++ b/test/extension/config/webhook/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go new file mode 100644 index 0000000000..6f19769fb7 --- /dev/null +++ b/test/extension/handlers/topologymutation/handler.go @@ -0,0 +1,358 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package topologymutation contains the handlers for the topologymutation webhook. +// +// The implementation of the handlers is specifically designed for Cluster API E2E tests use cases. +// When implementing custom RuntimeExtension, it is only required to expose HandlerFunc with the +// signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +package topologymutation + +import ( + "context" + "fmt" + "regexp" + + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/utils/ptr" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" + ctrl "sigs.k8s.io/controller-runtime" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/internal/clusterclass" + "sigs.k8s.io/cluster-api-provider-vsphere/internal/kubevip" +) + +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;patch;update;create + +// ExtensionHandlers provides a common struct shared across the topology mutation hooks handlers; +// this is convenient because in Cluster API's E2E tests all of them are using a decoder for working with typed +// API objects, which makes code easier to read and less error prone than using unstructured or working with raw json/yaml. +// NOTE: it is not mandatory to use a ExtensionHandlers in custom RuntimeExtension, what is important +// is to expose HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +type ExtensionHandlers struct { + decoder runtime.Decoder +} + +// NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. +func NewExtensionHandlers(scheme *runtime.Scheme) *ExtensionHandlers { + return &ExtensionHandlers{ + // Add the apiGroups being handled to the decoder + decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( + infrav1.GroupVersion, + vmwarev1.GroupVersion, + controlplanev1.GroupVersion, + bootstrapv1.GroupVersion, + ), + } +} + +// GeneratePatches implements the HandlerFunc for the GeneratePatches hook. +// The hook adds to the response the patches we are using in Cluster API E2E tests. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehooksv1.GeneratePatchesRequest, resp *runtimehooksv1.GeneratePatchesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("GeneratePatches is called") + + // By using WalkTemplates it is possible to implement patches using typed API objects, which makes code + // easier to read and less error prone than using unstructured or working with raw json/yaml. + // IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified. + topologymutation.WalkTemplates(ctx, h.decoder, req, resp, + func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error { + log := ctrl.LoggerFrom(ctx) + + isControlPlane := holderRef.Kind == "KubeadmControlPlane" + + switch obj := obj.(type) { + case *controlplanev1.KubeadmControlPlaneTemplate: + if err := patchKubeadmControlPlaneTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching KubeadmControlPlaneTemplate") + return errors.Wrap(err, "error patching KubeadmControlPlaneTemplate") + } + case *bootstrapv1.KubeadmConfigTemplate: + if err := patchKubeadmConfigTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching KubeadmConfigTemplate") + return errors.Wrap(err, "error patching KubeadmConfigTemplate") + } + case *infrav1.VSphereClusterTemplate: + if err := patchGovmomiClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *infrav1.VSphereMachineTemplate: + if err := patchGovmomiMachineTemplate(ctx, obj, variables, isControlPlane); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + case *vmwarev1.VSphereClusterTemplate: + if err := patchSupervisorClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *vmwarev1.VSphereMachineTemplate: + if err := patchSupervisorMachineTemplate(ctx, obj, variables, isControlPlane); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + } + return nil + }, + // Use a merge-patch instead of a JSON patch because WalkTemplates would create + // an incompatible patch for vmwarev1.VSphereClusterTemplate because we provide + // an empty template without a set `.spec` and due to omitempty + // `.spec.template.spec.controlPlaneEndpoint` does not exist. + topologymutation.PatchFormat{Format: runtimehooksv1.JSONMergePatchType}, + ) +} + +// patchKubeadmControlPlaneTemplate patches the KubeadmControlPlaneTemplate. +func patchKubeadmControlPlaneTemplate(_ context.Context, tpl *controlplanev1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch enableSSHIntoNodes + if err := patchUsers(&tpl.Spec.Template.Spec.KubeadmConfigSpec, templateVariables); err != nil { + return err + } + + // patch kubeVipPodManifest + kubeVipPodManifest, err := topologymutation.GetStringVariable(templateVariables, "kubeVipPodManifest") + kubeVipPodManifestNotFound := topologymutation.IsNotFoundError(err) + if err != nil && !kubeVipPodManifestNotFound { + return err + } + // Skip patch if kubeVipPodManifest variable was not found / not set. + if !kubeVipPodManifestNotFound { + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + kubeVipPodManifestModified := regexp.MustCompile("(name: address\n +value:).*").ReplaceAllString(kubeVipPodManifest, fmt.Sprintf("$1 %s", controlPlaneIPAddr)) + + for _, file := range kubevip.Files() { + if file.Path == "/etc/kubernetes/manifests/kube-vip.yaml" { + file.Content = kubeVipPodManifestModified + } + tpl.Spec.Template.Spec.KubeadmConfigSpec.Files = append(tpl.Spec.Template.Spec.KubeadmConfigSpec.Files, file) + } + } + + // patch preKubeadmScript + preKubeadmScript, err := topologymutation.GetStringVariable(templateVariables, "preKubeadmScript") + preKubeadmScriptNotFound := topologymutation.IsNotFoundError(err) + if err != nil && !preKubeadmScriptNotFound { + return err + } + // Skip patch if preKubeadmScript variable was not found / not set. + if !preKubeadmScriptNotFound { + version, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") + if err != nil { + return err + } + + versionRegex := regexp.MustCompile("(KUBERNETES_VERSION=.*)") + tpl.Spec.Template.Spec.KubeadmConfigSpec.Files = append(tpl.Spec.Template.Spec.KubeadmConfigSpec.Files, + bootstrapv1.File{ + Owner: "root:root", + Path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh", + Permissions: "0755", + Content: versionRegex.ReplaceAllString(preKubeadmScript, fmt.Sprintf("KUBERNETES_VERSION=%s", version)), + }, + ) + } + + return nil +} + +// KubeadmConfigTemplate patches the KubeadmConfigTemplate. +func patchKubeadmConfigTemplate(_ context.Context, tpl *bootstrapv1.KubeadmConfigTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch enableSSHIntoNodes + if err := patchUsers(&tpl.Spec.Template.Spec, templateVariables); err != nil { + return err + } + + // patch preKubeadmScript + preKubeadmScript, err := topologymutation.GetStringVariable(templateVariables, "preKubeadmScript") + preKubeadmScriptNotFound := topologymutation.IsNotFoundError(err) + if err != nil && !preKubeadmScriptNotFound { + return err + } + if !preKubeadmScriptNotFound { + version, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.version") + if err != nil { + return err + } + + versionRegex := regexp.MustCompile("(KUBERNETES_VERSION=.*)") + tpl.Spec.Template.Spec.Files = append(tpl.Spec.Template.Spec.Files, + bootstrapv1.File{ + Owner: "root:root", + Path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh", + Permissions: "0755", + Content: versionRegex.ReplaceAllString(preKubeadmScript, fmt.Sprintf("KUBERNETES_VERSION=%s", version)), + }, + ) + } + + return nil +} + +func patchUsers(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, templateVariables map[string]apiextensionsv1.JSON) error { + sshKey, err := topologymutation.GetStringVariable(templateVariables, "sshKey") + if err != nil { + // Skip patch if sshKey variable is not set + if topologymutation.IsNotFoundError(err) { + return nil + } + return err + } + + kubeadmConfigSpec.Users = append(kubeadmConfigSpec.Users, + bootstrapv1.User{ + Name: "capv", + SSHAuthorizedKeys: []string{sshKey}, + Sudo: ptr.To("ALL=(ALL) NOPASSWD:ALL"), + }) + return nil +} + +// patchGovmomiClusterTemplate patches the govmomi VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiClusterTemplate(_ context.Context, vsphereCluster *infrav1.VSphereClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch infraClusterSubstitutions + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + var controlPlanePort int32 + if err := topologymutation.GetObjectVariableInto(templateVariables, "controlPlanePort", &controlPlanePort); err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Host = controlPlaneIPAddr + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Port = controlPlanePort + + credsSecretName, err := topologymutation.GetStringVariable(templateVariables, "credsSecretName") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.IdentityRef = &infrav1.VSphereIdentityReference{ + Kind: infrav1.SecretKind, + Name: credsSecretName, + } + + infraServerURL, err := topologymutation.GetStringVariable(templateVariables, "infraServer.url") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.Server = infraServerURL + + infraServerThumbprint, err := topologymutation.GetStringVariable(templateVariables, "infraServer.thumbprint") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.Thumbprint = infraServerThumbprint + + return nil +} + +// patchSupervisorClusterTemplate patches the supervisor VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorClusterTemplate(_ context.Context, vsphereCluster *vmwarev1.VSphereClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch infraClusterSubstitutions + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + var controlPlanePort int32 + if err := topologymutation.GetObjectVariableInto(templateVariables, "controlPlanePort", &controlPlanePort); err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Host = controlPlaneIPAddr + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Port = controlPlanePort + + return nil +} + +// patchGovmomiMachineTemplate patches the govmomi VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiMachineTemplate(_ context.Context, vsphereMachineTemplate *infrav1.VSphereMachineTemplate, templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) error { + // patch vSphereTemplate + + var err error + vsphereMachineTemplate.Spec.Template.Spec.Template, err = calculateImageName(templateVariables, isControlPlane) + + return err +} + +// patchSupervisorMachineTemplate patches the supervisor VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorMachineTemplate(_ context.Context, vsphereMachineTemplate *vmwarev1.VSphereMachineTemplate, templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) error { + // patch vSphereTemplate + + var err error + vsphereMachineTemplate.Spec.Template.Spec.ImageName, err = calculateImageName(templateVariables, isControlPlane) + + return err +} + +func calculateImageName(templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) (string, error) { + // patch vSphereTemplate + versionVariable := "builtin.controlPlane.version" + if !isControlPlane { + versionVariable = "builtin.machineDeployment.version" + } + + version, err := topologymutation.GetStringVariable(templateVariables, versionVariable) + if err != nil { + return "", err + } + + // Fallback to the v1.30.0 image, except for v1.28.0 and v1.29.0. + if version != "v1.28.0" && version != "v1.29.0" { + version = "v1.30.0" + } + + return fmt.Sprintf("ubuntu-2204-kube-%s", version), nil +} + +// ValidateTopology implements the HandlerFunc for the ValidateTopology hook. +// Cluster API E2E currently are just validating the hook gets called. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) ValidateTopology(ctx context.Context, _ *runtimehooksv1.ValidateTopologyRequest, resp *runtimehooksv1.ValidateTopologyResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("ValidateTopology called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess +} + +// DiscoverVariables implements the HandlerFunc for the DiscoverVariables hook. +func (h *ExtensionHandlers) DiscoverVariables(ctx context.Context, req *runtimehooksv1.DiscoverVariablesRequest, resp *runtimehooksv1.DiscoverVariablesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("DiscoverVariables called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess + + resp.Variables = clusterclass.GetClusterClassVariables(req.Settings["testMode"] == "govmomi") +} diff --git a/test/extension/main.go b/test/extension/main.go new file mode 100644 index 0000000000..172bd86471 --- /dev/null +++ b/test/extension/main.go @@ -0,0 +1,399 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// main is the main package for the test extension. +// The test extension serves two goals: +// - to provide a reference implementation of Runtime Extension +// - to implement the Runtime Extension used by Cluster API E2E tests. +package main + +import ( + "context" + "flag" + "os" + goruntime "runtime" + "time" + + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection/resourcelock" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + logsv1 "k8s.io/component-base/logs/api/v1" + _ "k8s.io/component-base/logs/json/register" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/server" + "sigs.k8s.io/cluster-api/test/extension/handlers/lifecycle" + "sigs.k8s.io/cluster-api/util/flags" + "sigs.k8s.io/cluster-api/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/extension/handlers/topologymutation" +) + +var ( + // catalog contains all information about RuntimeHooks. + catalog = runtimecatalog.New() + + // scheme is a Kubernetes runtime scheme containing all the information about API types used by the test extension. + // NOTE: it is not mandatory to use scheme in custom RuntimeExtension, but working with typed API objects makes code + // easier to read and less error-prone than using unstructured or working with raw json/yaml. + scheme = runtime.NewScheme() + // Creates a logger to be used during the main func using controller runtime utilities + // NOTE: it is not mandatory to use controller runtime utilities in custom RuntimeExtension, but it is recommended + // because it makes log from those components similar to log from controllers. + setupLog = ctrl.Log.WithName("setup") + controllerName = "capv-test-extension-manager" + + // flags. + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + profilerAddress string + enableContentionProfiling bool + syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int + webhookPort int + webhookCertDir string + webhookCertName string + webhookKeyName string + healthAddr string + tlsOptions = flags.TLSOptions{} + diagnosticsOptions = flags.DiagnosticsOptions{} + logOptions = logs.NewOptions() +) + +func init() { + // Adds to the scheme all the API types we used by the test extension. + _ = clientgoscheme.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + + _ = clusterv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) + + _ = infrav1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + + // Register the RuntimeHook types into the catalog. + _ = runtimehooksv1.AddToCatalog(catalog) +} + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + // Initialize logs flags using Kubernetes component-base machinery. + // NOTE: it is not mandatory to use Kubernetes component-base machinery in custom RuntimeExtension, but it is + // recommended because it helps in ensuring consistency across different components in the cluster. + logsv1.AddFlags(logOptions, fs) + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir.") + + fs.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", + "Webhook cert name.") + + fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", + "Webhook key name.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") + + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) + flags.AddTLSOptions(fs, &tlsOptions) + + // Add test-extension specific flags + // NOTE: it is not mandatory to use the same flag names in all RuntimeExtension, but it is recommended when + // addressing common concerns like profiler-address, webhook-port, webhook-cert-dir etc. because it helps in ensuring + // consistency across different components in the cluster. +} + +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + // Initialize and parse command line flags. + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "Failed to set default log level") + os.Exit(1) + } + pflag.Parse() + + // Validates logs flags using Kubernetes component-base machinery and apply them + // so klog will automatically use the right logger. + // NOTE: klog is the log of choice of component-base machinery. + if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { + setupLog.Error(err, "Unable to start extension") + os.Exit(1) + } + + // Add the klog logger in the context. + // NOTE: it is not mandatory to use contextual logging in custom RuntimeExtension, but it is recommended + // because it allows to use a log stored in the context across the entire chain of calls (without + // requiring an addition log parameter in all the functions). + ctrl.SetLogger(klog.Background()) + + restConfig := ctrl.GetConfigOrDie() + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) + + tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) + if err != nil { + setupLog.Error(err, "Unable to add TLS settings to the webhook server") + os.Exit(1) + } + + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + // Create an HTTP server for serving Runtime Extensions. + runtimeExtensionWebhookServer, err := server.New(server.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + KeyName: webhookKeyName, + TLSOpts: tlsOptionOverrides, + Catalog: catalog, + }) + if err != nil { + setupLog.Error(err, "Error creating runtime extension webhook server") + os.Exit(1) + } + + ctrlOptions := ctrl.Options{ + Scheme: scheme, + LeaderElection: enableLeaderElection, + LeaderElectionID: "controller-leader-election-capv-test-extension", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + SyncPeriod: &syncPeriod, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + // Use the cache for all Unstructured get/list calls. + Unstructured: true, + }, + }, + WebhookServer: runtimeExtensionWebhookServer, + } + + // Start the manager + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) + if err != nil { + setupLog.Error(err, "Unable to start manager") + os.Exit(1) + } + + // Set up a context listening for SIGINT. + ctx := ctrl.SetupSignalHandler() + + // Setup Runtime Extensions. + setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer) + setupLifecycleHookHandlers(mgr, runtimeExtensionWebhookServer) + + // Setup checks, indexes, reconcilers and webhooks. + setupChecks(mgr) + setupIndexes(ctx, mgr) + setupReconcilers(ctx, mgr) + setupWebhooks(mgr) + + setupLog.Info("Starting manager", "version", version.Get().String()) + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "Problem running manager") + os.Exit(1) + } +} + +// setupTopologyMutationHookHandlers sets up Topology Mutation Hooks (Runtime Patches). +func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Server) { + // Create the ExtensionHandlers for the Topology Mutation Hooks. + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + topologyMutationExtensionHandlers := topologymutation.NewExtensionHandlers(scheme) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.GeneratePatches, + Name: "generate-patches", + HandlerFunc: topologyMutationExtensionHandlers.GeneratePatches, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.ValidateTopology, + Name: "validate-topology", + HandlerFunc: topologyMutationExtensionHandlers.ValidateTopology, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.DiscoverVariables, + Name: "discover-variables", + HandlerFunc: topologyMutationExtensionHandlers.DiscoverVariables, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +// setupLifecycleHookHandlers sets up Lifecycle Hooks. +func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer *server.Server) { + // Create the ExtensionHandlers for the lifecycle hooks + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + lifecycleExtensionHandlers := lifecycle.NewExtensionHandlers(mgr.GetClient()) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterCreate, + Name: "before-cluster-create", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterCreate, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneInitialized, + Name: "after-control-plane-initialized", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneInitialized, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterUpgrade, + Name: "before-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneUpgrade, + Name: "after-control-plane-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterClusterUpgrade, + Name: "after-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterDelete, + Name: "before-cluster-delete", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterDelete, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +func setupChecks(mgr ctrl.Manager) { + if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create health check") + os.Exit(1) + } +} + +func setupIndexes(_ context.Context, _ ctrl.Manager) { +} + +func setupReconcilers(_ context.Context, _ ctrl.Manager) { +} + +func setupWebhooks(_ ctrl.Manager) { +} diff --git a/test/extension/tilt-provider.yaml b/test/extension/tilt-provider.yaml new file mode 100644 index 0000000000..8723800266 --- /dev/null +++ b/test/extension/tilt-provider.yaml @@ -0,0 +1,11 @@ +--- +- name: capv-test-extension + config: + version: v1.11.99 + image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-test-extension + live_reload_deps: + - main.go + - handlers + label: CAPV_EXTENSION + additional_resources: + - config/tilt/extensionconfig.yaml diff --git a/test/go.mod b/test/go.mod index 613f90af8c..2656ef481a 100644 --- a/test/go.mod +++ b/test/go.mod @@ -19,6 +19,7 @@ require ( ) require ( + github.com/blang/semver/v4 v4.0.0 github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 @@ -27,6 +28,7 @@ require ( golang.org/x/crypto v0.25.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.30.2 + k8s.io/apiextensions-apiserver v0.30.2 k8s.io/apimachinery v0.30.2 k8s.io/client-go v0.30.2 k8s.io/component-base v0.30.2 @@ -54,7 +56,6 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -73,6 +74,7 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect @@ -165,7 +167,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.2 // indirect k8s.io/apiserver v0.30.2 // indirect k8s.io/cluster-bootstrap v0.30.2 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect diff --git a/test/infrastructure/net-operator/tilt-provider.yaml b/test/infrastructure/net-operator/tilt-provider.yaml index 5a72dea94c..05afa8f8b5 100644 --- a/test/infrastructure/net-operator/tilt-provider.yaml +++ b/test/infrastructure/net-operator/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: net-operator config: - version: v1.10.99 + version: v1.11.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-net-operator live_reload_deps: - main.go diff --git a/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml b/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml index cd7ae12c01..74a0879c60 100644 --- a/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml +++ b/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml @@ -8,4 +8,4 @@ spec: spec: containers: - name: manager - imagePullPolicy: IfNotPresent + imagePullPolicy: Always diff --git a/test/infrastructure/vcsim/tilt-provider.yaml b/test/infrastructure/vcsim/tilt-provider.yaml index 1726426c73..edc92ca8b3 100644 --- a/test/infrastructure/vcsim/tilt-provider.yaml +++ b/test/infrastructure/vcsim/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: vcsim config: - version: v1.10.99 + version: v1.11.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller live_reload_deps: - main.go diff --git a/tilt-provider.yaml b/tilt-provider.yaml index 4a5a9715b6..b8ec74a2b9 100644 --- a/tilt-provider.yaml +++ b/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: vsphere config: - version: v1.10.99 + version: v1.11.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller live_reload_deps: - main.go @@ -15,7 +15,7 @@ label: CAPV - name: vsphere-supervisor config: - version: v1.10.99 + version: v1.11.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller live_reload_deps: - main.go