Skip to content

Commit

Permalink
refactor: change e2e tests so its easier to use different providers
Browse files Browse the repository at this point in the history
This change refactors the existing e2e tests so that there is a new spec
that can be re-used with different template files to enable us to test
different provider combinations.

Additionally the tests have been changed to use an in-cluster Git server
(gitea) so that we can create new repos as part of the test and remove
the reliance on the samples repo.

Signed-off-by: Richard Case <richard.case@suse.com>
  • Loading branch information
richardcase committed Sep 25, 2023
1 parent 3daef42 commit 40c214a
Show file tree
Hide file tree
Showing 29 changed files with 1,723 additions and 165 deletions.
3 changes: 3 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,6 @@ cmd/clusterctl/clusterctl/**
**/*.tmp
**/.DS_Store
**/*.swp

# Ignore go.work files
go.work*
25 changes: 22 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ TEST_DIR := test
TOOLS_DIR := hack/tools
TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR))

$(TOOLS_BIN_DIR):
mkdir -p $@

export PATH := $(abspath $(TOOLS_BIN_DIR)):$(PATH)

# Set --output-base for conversion-gen if we are not within GOPATH
Expand Down Expand Up @@ -110,6 +113,10 @@ HELM_VER := v3.8.1
HELM_BIN := helm
HELM := $(TOOLS_BIN_DIR)/$(HELM_BIN)-$(HELM_VER)

CLUSTERCTL_VER := v1.4.6
CLUSTERCTL_BIN := clusterctl
CLUSTERCTL := $(TOOLS_BIN_DIR)/$(CLUSTERCTL_BIN)-$(CLUSTERCTL_VER)

GOLANGCI_LINT_VER := v1.53.3
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN))
Expand Down Expand Up @@ -419,6 +426,10 @@ $(HELM): ## Put helm into tools folder.
ln -sf $(HELM) $(TOOLS_BIN_DIR)/$(HELM_BIN)
rm -f $(TOOLS_BIN_DIR)/get_helm.sh

$(CLUSTERCTL): $(TOOLS_BIN_DIR) ## Download and install clusterctl
curl --retry $(CURL_RETRIES) -fsSL -o $(CLUSTERCTL) https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.6/clusterctl-linux-amd64
chmod +x $(CLUSTERCTL)

## --------------------------------------
## Release
## --------------------------------------
Expand Down Expand Up @@ -452,9 +463,7 @@ release-chart: $(HELM) $(NOTES) build-chart verify-gen
$(HELM) package $(CHART_RELEASE_DIR) --app-version=$(HELM_CHART_TAG) --version=$(HELM_CHART_TAG) --destination=$(CHART_PACKAGE_DIR)

.PHONY: test-e2e
test-e2e: $(GINKGO) $(HELM) kubectl ## Run the end-to-end tests
TAG=v0.0.1 $(MAKE) docker-build
RELEASE_TAG=v0.0.1 CONTROLLER_IMG=$(MANIFEST_IMG) $(MAKE) build-chart
test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-end tests
RANCHER_HOSTNAME=$(RANCHER_HOSTNAME) \
$(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \
-poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" \
Expand All @@ -463,11 +472,21 @@ test-e2e: $(GINKGO) $(HELM) kubectl ## Run the end-to-end tests
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE)" \
-e2e.helm-binary-path=$(HELM) \
-e2e.clusterctl-binary-path=$(CLUSTERCTL) \
-e2e.chart-path=$(ROOT_DIR)/$(CHART_RELEASE_DIR) \
-e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \
-e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \
-e2e.isolated-mode=$(ISOLATED_MODE)

.PHONY: e2e-image
e2e-image: ## Build the image for e2e tests
TAG=v0.0.1 $(MAKE) docker-build
RELEASE_TAG=v0.0.1 CONTROLLER_IMG=$(MANIFEST_IMG) $(MAKE) build-chart

.PHONY: compile-e2e
e2e-compile: ## Test e2e compilation
go test -c -o /dev/null -tags=e2e ./test/e2e

## --------------------------------------
## Documentation
## --------------------------------------
Expand Down
10 changes: 9 additions & 1 deletion test/e2e/config/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,12 @@ images:
intervals:
default/wait-controllers: ["3m", "10s"]
default/wait-rancher: ["15m", "30s"]
default/wait-gitea: ["3m", "10s"]
default/wait-consistently: ["30s", "5s"]
default/wait-getservice: ["60s", "5s"]

variables:
RANCHER_VERSION: "v2.7.5"
RANCHER_VERSION: "v2.7.6"
RANCHER_HOSTNAME: "localhost"
RANCHER_FEATURES: "embedded-cluster-api=false"
RANCHER_PATH: "rancher-stable/rancher"
Expand All @@ -27,3 +29,9 @@ variables:
NGROK_PATH: "ngrok/kubernetes-ingress-controller"
NGROK_API_KEY: ""
NGROK_AUTHTOKEN: ""
GITEA_REPO_NAME: "gitea-charts"
GITEA_REPO_URL: "https://dl.gitea.com/charts/"
GITEA_CHART_NAME: "gitea"
GITEA_CHART_VERSION: "9.4.0"
GITEA_USER_NAME: "gitea_admin"
GITEA_USER_PWD: "password"
283 changes: 283 additions & 0 deletions test/e2e/data/cluster-templates/docker-kubeadm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
metadata:
name: ${CLUSTER_NAME}
annotations:
"helm.sh/resource-policy": keep
spec:
failureDomains:
fd1:
controlPlane: true
fd2:
controlPlane: true
fd3:
controlPlane: true
fd4:
controlPlane: false
fd5:
controlPlane: false
fd6:
controlPlane: false
fd7:
controlPlane: false
fd8:
controlPlane: false
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cni: ${CLUSTER_NAME}-crs-0
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
serviceDomain: cluster.local
services:
cidrBlocks:
- 10.128.0.0/12
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: ${CLUSTER_NAME}-control-plane
spec:
template:
spec:
extraMounts:
- containerPath: /var/run/docker.sock
hostPath: /var/run/docker.sock
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: ${CLUSTER_NAME}-control-plane
annotations:
"helm.sh/resource-policy": keep
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
certSANs:
- localhost
- 127.0.0.1
- 0.0.0.0
- host.docker.internal
controllerManager:
extraArgs:
enable-hostpath-provisioner: "true"
initConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
joinConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: ${CLUSTER_NAME}-control-plane
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
version: ${KUBERNETES_VERSION}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
spec:
template:
spec:
extraMounts:
- containerPath: /var/run/docker.sock
hostPath: /var/run/docker.sock
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: ${CLUSTER_NAME}-md-0
annotations:
"helm.sh/resource-policy": keep
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: ${CLUSTER_NAME}-md-0
annotations:
"helm.sh/resource-policy": keep
spec:
clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels: null
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: ${CLUSTER_NAME}-md-0
clusterName: ${CLUSTER_NAME}
failureDomain: fd4
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: ${CLUSTER_NAME}-md-0
version: ${KUBERNETES_VERSION}
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: ${CLUSTER_NAME}-crs-0
spec:
clusterSelector:
matchLabels:
cni: ${CLUSTER_NAME}-crs-0
resources:
- kind: ConfigMap
name: cni-${CLUSTER_NAME}-crs-0
strategy: ApplyOnce
---
apiVersion: v1
data:
kindnet.yaml: |
# kindnetd networking manifest
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kindnet
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kindnet
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kindnet
subjects:
- kind: ServiceAccount
name: kindnet
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kindnet
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kindnet
namespace: kube-system
labels:
tier: node
app: kindnet
k8s-app: kindnet
spec:
selector:
matchLabels:
app: kindnet
template:
metadata:
labels:
tier: node
app: kindnet
k8s-app: kindnet
spec:
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: kindnet
containers:
- name: kindnet-cni
image: kindest/kindnetd:v20230330-48f316cd
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_SUBNET
value: '192.168.0.0/16'
volumeMounts:
- name: cni-cfg
mountPath: /etc/cni/net.d
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: lib-modules
mountPath: /lib/modules
readOnly: true
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_RAW", "NET_ADMIN"]
volumes:
- name: cni-bin
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
- name: cni-cfg
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: lib-modules
hostPath:
path: /lib/modules
kind: ConfigMap
metadata:
name: cni-${CLUSTER_NAME}-crs-0
Loading

0 comments on commit 40c214a

Please sign in to comment.