Skip to content

Commit

Permalink
add simple e2e tests for replication
Browse files Browse the repository at this point in the history
Signed-off-by: Ryotaro Banno <ryotaro.banno@gmail.com>
  • Loading branch information
ushitora-anqou committed Aug 29, 2024
1 parent bfe05cf commit 5dc2dfb
Show file tree
Hide file tree
Showing 13 changed files with 378 additions and 85 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,5 @@ Dockerfile.cross
*~

test/e2e/testdata/persistentvolumes.yaml
test/e2e/testdata/values-mantle-primary.yaml
include/
110 changes: 78 additions & 32 deletions test/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -36,21 +36,49 @@ setup:

.PHONY: test
test:
$(MAKE) launch-cluster MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_PRIMARY)
$(MINIKUBE) profile $(MINIKUBE_PROFILE_PRIMARY)
$(MAKE) launch-minikube MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_PRIMARY)
$(MAKE) install-rook-ceph-operator
$(MAKE) install-rook-ceph-cluster1
$(MAKE) install-rook-ceph-cluster2
$(MAKE) install-mantle-cluster-wide
$(MAKE) install-mantle \
NAMESPACE=$(CEPH_CLUSTER1_NAMESPACE) \
HELM_RELEASE=mantle \
VALUES_YAML=testdata/values-mantle1.yaml
$(MAKE) install-mantle \
NAMESPACE=$(CEPH_CLUSTER2_NAMESPACE) \
HELM_RELEASE=mantle2 \
VALUES_YAML=testdata/values-mantle2.yaml
$(MAKE) do_test

.PHONY: test-multiple-k8s-clusters
test-multiple-k8s-clusters:
$(MAKE) launch-cluster MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_PRIMARY)
$(MAKE) launch-cluster MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_SECONDARY)
# set up a k8s cluster for secondary mantle
$(MAKE) launch-minikube MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_SECONDARY)
$(MAKE) install-rook-ceph-operator
$(MAKE) install-rook-ceph-cluster1
$(MAKE) install-mantle-cluster-wide
$(MAKE) install-mantle \
NAMESPACE=$(CEPH_CLUSTER1_NAMESPACE) \
HELM_RELEASE=mantle \
VALUES_YAML=testdata/values-mantle-secondary.yaml
$(KUBECTL) apply -f testdata/secondary-mantle-service.yaml
# set up a k8s cluster for primary mantle
$(MAKE) launch-minikube MINIKUBE_PROFILE=$(MINIKUBE_PROFILE_PRIMARY)
$(MAKE) install-rook-ceph-operator
$(MAKE) install-rook-ceph-cluster1
$(MAKE) install-mantle-cluster-wide
sed \
-e "s%{ENDPOINT}%$$($(MINIKUBE) service list -p $(MINIKUBE_PROFILE_SECONDARY) -o json | jq -r '.[].URLs | select(. | length > 0)[]' | head -1 | sed -r 's/^http:\/\///')%" \
testdata/values-mantle-primary-template.yaml \
> testdata/values-mantle-primary.yaml
$(MAKE) install-mantle \
NAMESPACE=$(CEPH_CLUSTER1_NAMESPACE) \
HELM_RELEASE=mantle \
VALUES_YAML=testdata/values-mantle-primary.yaml
# start testing
$(MINIKUBE) profile $(MINIKUBE_PROFILE_PRIMARY)
env \
MINIKUBE=$(MINIKUBE) \
MINIKUBE_HOME=$(MINIKUBE_HOME) \
MINIKUBE_PROFILE_PRIMARY=$(MINIKUBE_PROFILE_PRIMARY) \
MINIKUBE_PROFILE_SECONDARY=$(MINIKUBE_PROFILE_SECONDARY) \
./test-multiple-k8s-clusters.sh
$(MAKE) do-test-multik8s

.PHONY: clean
clean:
Expand All @@ -75,9 +103,9 @@ $(HELM): | $(BINDIR)
$(CURL) https://get.helm.sh/helm-v$(HELM_VERSION)-linux-amd64.tar.gz \
| tar xvz -C $(BINDIR) --strip-components 1 linux-amd64/helm

.PHONY: launch-cluster
launch-cluster: MINIKUBE_PROFILE=
launch-cluster:
.PHONY: launch-minikube
launch-minikube: MINIKUBE_PROFILE=
launch-minikube:
# TODO: Is there any better way to verify whether k8s cluster is available or not?
if $(MINIKUBE) profile $(MINIKUBE_PROFILE) |& grep "not found" > /dev/null; then \
$(MINIKUBE) start \
Expand All @@ -92,8 +120,14 @@ launch-cluster:
fi
$(MINIKUBE) profile $(MINIKUBE_PROFILE)
$(MAKE) image-build
$(MAKE) launch-rook-ceph
$(MAKE) setup-components
$(MAKE) create-loop-dev
sed \
-e "s%{LOOP_DEV}%$(LOOP_DEV)%" \
-e "s%{LOOP_DEV2}%$(LOOP_DEV2)%" \
-e "s%{NODE_NAME}%$(NODE_NAME)%" \
testdata/persistentvolumes-template.yaml \
> testdata/persistentvolumes.yaml
$(KUBECTL) apply -f testdata/persistentvolumes.yaml

.PHONY: create-loop-dev
create-loop-dev:
Expand Down Expand Up @@ -122,27 +156,26 @@ wait-deploy-ready:
exit 1; \
fi

.PHONY: launch-rook-ceph
launch-rook-ceph: create-loop-dev
.PHONY: install-rook-ceph-operator
install-rook-ceph-operator:
$(HELM) upgrade --install --version $(ROOK_CHART_VERSION) --repo https://charts.rook.io/release \
--create-namespace --namespace $(CEPH_CLUSTER1_NAMESPACE) -f testdata/values.yaml --wait \
rook-ceph rook-ceph
sed \
-e "s%{LOOP_DEV}%$(LOOP_DEV)%" \
-e "s%{LOOP_DEV2}%$(LOOP_DEV2)%" \
-e "s%{NODE_NAME}%$(NODE_NAME)%" \
testdata/persistentvolumes-template.yaml \
> testdata/persistentvolumes.yaml
$(KUBECTL) apply -f testdata/persistentvolumes.yaml
$(MAKE) wait-deploy-ready NS=$(CEPH_CLUSTER1_NAMESPACE) DEPLOY=rook-ceph-operator

.PHONY: install-rook-ceph-cluster1
install-rook-ceph-cluster1:
$(HELM) upgrade --install --version $(ROOK_CHART_VERSION) --repo https://charts.rook.io/release \
--namespace $(CEPH_CLUSTER1_NAMESPACE) -f testdata/values-cluster.yaml \
--wait rook-ceph-cluster rook-ceph-cluster
$(MAKE) wait-deploy-ready NS=$(CEPH_CLUSTER1_NAMESPACE) DEPLOY=rook-ceph-osd-0

.PHONY: install-rook-ceph-cluster2
install-rook-ceph-cluster2:
$(HELM) upgrade --install --version $(ROOK_CHART_VERSION) --repo https://charts.rook.io/release \
--create-namespace --namespace $(CEPH_CLUSTER2_NAMESPACE) -f testdata/values-cluster.yaml \
--set cephClusterSpec.dataDirHostPath=/var/lib/rook2 \
--wait rook-ceph-cluster2 rook-ceph-cluster
$(MAKE) wait-deploy-ready NS=$(CEPH_CLUSTER1_NAMESPACE) DEPLOY=rook-ceph-operator
$(MAKE) wait-deploy-ready NS=$(CEPH_CLUSTER1_NAMESPACE) DEPLOY=rook-ceph-osd-0
$(MAKE) wait-deploy-ready NS=$(CEPH_CLUSTER2_NAMESPACE) DEPLOY=rook-ceph-osd-0

.PHONY: image-build
Expand All @@ -151,13 +184,17 @@ image-build:
$(MAKE) -C ../.. docker-build
$(MINIKUBE) ssh -- docker images

.PHONY: setup-components
setup-components:
.PHONY: install-mantle-cluster-wide
install-mantle-cluster-wide:
$(HELM) upgrade --install mantle-cluster-wide ../../charts/mantle-cluster-wide/ --wait
$(HELM) upgrade --install --namespace=$(CEPH_CLUSTER1_NAMESPACE) mantle ../../charts/mantle/ --wait -f testdata/values-mantle.yaml
$(HELM) upgrade --install --namespace=$(CEPH_CLUSTER2_NAMESPACE) mantle2 ../../charts/mantle/ --wait
$(KUBECTL) rollout restart -n $(CEPH_CLUSTER1_NAMESPACE) deploy/mantle-controller
$(KUBECTL) rollout restart -n $(CEPH_CLUSTER2_NAMESPACE) deploy/mantle2-controller

.PHONY: install-mantle
install-mantle: NAMESPACE=
install-mantle: HELM_RELEASE=
install-mantle: VALUES_YAML=
install-mantle:
$(HELM) upgrade --install --namespace=$(NAMESPACE) $(HELM_RELEASE) ../../charts/mantle/ --wait -f $(VALUES_YAML)
$(KUBECTL) rollout restart -n $(NAMESPACE) deploy/$(HELM_RELEASE)-controller

.PHONY: do_test
do_test: $(GINKGO)
Expand All @@ -166,3 +203,12 @@ do_test: $(GINKGO)
E2ETEST=1 \
KUBECTL=$(KUBECTL) \
$(GINKGO) --fail-fast -v $(GINKGO_FLAGS) singlek8s

.PHONY: do-test-multik8s
do-test-multik8s: $(GINKGO)
env \
PATH=${PATH} \
E2ETEST=1 \
KUBECTL_PRIMARY="$(MINIKUBE) -p $(MINIKUBE_PROFILE_PRIMARY) kubectl -- " \
KUBECTL_SECONDARY="$(MINIKUBE) -p $(MINIKUBE_PROFILE_SECONDARY) kubectl -- " \
$(GINKGO) --fail-fast -v $(GINKGO_FLAGS) multik8s
91 changes: 91 additions & 0 deletions test/e2e/multik8s/suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
package multik8s

import (
_ "embed"
"errors"
"os"
"testing"
"time"

"github.com/cybozu-go/mantle/test/util"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/meta"

mantlev1 "github.com/cybozu-go/mantle/api/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func TestMtest(t *testing.T) {
if os.Getenv("E2ETEST") == "" {
t.Skip("Run under e2e/")
}

RegisterFailHandler(Fail)

SetDefaultEventuallyPollingInterval(time.Second)
SetDefaultEventuallyTimeout(3 * time.Minute)

RunSpecs(t, "rbd backup system test with multiple k8s clusters")
}

var _ = Describe("Mantle", func() {
Context("wait controller to be ready", waitControllerToBeReady)
Context("replication test", replicationTestSuite)
})

func waitControllerToBeReady() {
It("wait for mantle-controller to be ready", func() {
Eventually(func() error {
return checkDeploymentReady(primaryK8sCluster, "rook-ceph", "mantle-controller")
}).Should(Succeed())

Eventually(func() error {
return checkDeploymentReady(primaryK8sCluster, "rook-ceph", "mantle-controller")
}).Should(Succeed())
})
}

func replicationTestSuite() {
Describe("replication test", func() {
It("should eventually set SyncedToRemote of a MantleBackup to True after it is created", func() {
namespace := util.GetUniqueName("ns-")
pvcName := util.GetUniqueName("pvc-")
backupName := util.GetUniqueName("mb-")
scName := util.GetUniqueName("sc-")
poolName := util.GetUniqueName("pool-")

By("setting up the environment")
Eventually(func() error {
return createNamespace(primaryK8sCluster, namespace)
}).Should(Succeed())
Eventually(func() error {
return applyRBDPoolAndSCTemplate(primaryK8sCluster, cephClusterNamespace, poolName, scName)
}).Should(Succeed())
Eventually(func() error {
return applyPVCTemplate(primaryK8sCluster, namespace, pvcName, scName)
}).Should(Succeed())

By("creating a MantleBackup object")
Eventually(func() error {
return applyMantleBackupTemplate(primaryK8sCluster, namespace, pvcName, backupName)
}).Should(Succeed())

By("checking MantleBackup's SyncedToRemote status")
Eventually(func() error {
mb, err := getMB(primaryK8sCluster, namespace, backupName)
if err != nil {
return err
}
cond := meta.FindStatusCondition(mb.Status.Conditions, mantlev1.BackupConditionSyncedToRemote)
if cond == nil {
return errors.New("couldn't find condition SyncedToRemote")
}
if cond.Status != metav1.ConditionTrue {
return errors.New("status of SyncedToRemote condition is not True")
}
return nil
}).Should(Succeed())
})
})
}
13 changes: 13 additions & 0 deletions test/e2e/multik8s/testdata/mantlebackup-template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: mantle.cybozu.io/v1
kind: MantleBackup
metadata:
labels:
app.kubernetes.io/name: mantlebackup
app.kubernetes.io/instance: %s
app.kubernetes.io/part-of: mantle
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/created-by: mantle
name: %s
namespace: %s
spec:
pvc: %s
12 changes: 12 additions & 0 deletions test/e2e/multik8s/testdata/pvc-template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: %s
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: %s
30 changes: 30 additions & 0 deletions test/e2e/multik8s/testdata/rbd-pool-sc-template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: %s
namespace: %s
spec:
failureDomain: osd
replicated:
size: 1
requireSafeReplicaSize: false
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: %s
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: %s
pool: %s
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: %s
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: %s
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: %s
csi.storage.k8s.io/fstype: ext4
allowVolumeExpansion: true
reclaimPolicy: Delete
Loading

0 comments on commit 5dc2dfb

Please sign in to comment.