diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 375e802d7981..97ef9ad80fa3 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -40,7 +40,7 @@ jobs: python-version: 3.9 - name: Set up chart-testing - uses: helm/chart-testing-action@v2.4.0 + uses: helm/chart-testing-action@v2.6.0 - name: Run chart-testing (lint) run: ct lint --charts=./deploy/charts/rook-ceph --validate-yaml=false --validate-maintainers=false diff --git a/Documentation/Contributing/development-environment.md b/Documentation/Contributing/development-environment.md index 383db60084cd..2c8b4ad176a0 100644 --- a/Documentation/Contributing/development-environment.md +++ b/Documentation/Contributing/development-environment.md @@ -87,3 +87,12 @@ docker tag "local/ceph-$(go env GOARCH)" 'rook/ceph:master' 4) Create a Rook cluster in minikube, or if the Rook cluster is already configured, apply the new operator image by restarting the operator. + + +## Creating a dev cluster + +To accelerate the development process, users have the option to employ the script located +at `tests/scripts/create-dev-cluster.sh`. This script is designed to rapidly set +up a new minikube environment, apply the CRDs and the common file, and then utilize the +`cluster-test.yaml` script to create the Rook cluster. Once setup, users can use the different `*-test.yaml` +files from the `deploy/examples/` directory to configure their clusters. diff --git a/Documentation/Troubleshooting/disaster-recovery.md b/Documentation/Troubleshooting/disaster-recovery.md index ee7ee42ad778..8f7728c111f2 100644 --- a/Documentation/Troubleshooting/disaster-recovery.md +++ b/Documentation/Troubleshooting/disaster-recovery.md @@ -63,7 +63,7 @@ the CRs to their prior state without even necessarily suffering cluster downtime the valdiating webhook in order to make changes. ```console - kubectl delete ValidatingWebhookConfiguration rook-ceph-webhook + kubectl -n rook-ceph delete ValidatingWebhookConfiguration rook-ceph-webhook ``` 4. Remove the owner references from all critical Rook resources that were referencing the `CephCluster` CR. diff --git a/deploy/examples/cluster-on-pvc-minikube.yaml b/deploy/examples/cluster-on-pvc-minikube.yaml new file mode 100644 index 000000000000..01bd3f049e6f --- /dev/null +++ b/deploy/examples/cluster-on-pvc-minikube.yaml @@ -0,0 +1,158 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with settings for a minikube cluster with a single node + +# This example expects a single node minikube cluster with three extra disks: vdb, vdc and vdd. Please modify +# it according to your environment. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster-on-pvc-minikube.yaml +################################################################################################################# +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: local-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-0 +spec: + storageClassName: local-storage + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Filesystem + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdb + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-1 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdc + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +kind: PersistentVolume +apiVersion: v1 +metadata: + name: local0-2 +spec: + storageClassName: local-storage + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + # PV for mon must be a filesystem volume. + volumeMode: Block + local: + # To use dm devices like logical volume, please replace `/dev/sdb` with their device names like `/dev/vg-name/lv-name`. + path: /dev/vdd + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - minikube +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: my-cluster + namespace: rook-ceph # namespace:cluster +spec: + dataDirHostPath: /var/lib/rook + mon: + count: 1 + allowMultiplePerNode: true + volumeClaimTemplate: + spec: + storageClassName: local-storage + resources: + requests: + storage: 10Gi + mgr: + count: 1 + modules: + - name: pg_autoscaler + enabled: true + dashboard: + enabled: true + ssl: false + crashCollector: + disable: false + cephVersion: + image: quay.io/ceph/ceph:v18 + allowUnsupported: false + skipUpgradeChecks: false + continueUpgradeAfterChecksEvenIfNotHealthy: false + storage: + storageClassDeviceSets: + - name: set1 + count: 2 + portable: false + tuneDeviceClass: true + tuneFastDeviceClass: false + encrypted: false + placement: + preparePlacement: + volumeClaimTemplates: + - metadata: + name: data + # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph + # annotations: + # crushDeviceClass: hybrid + spec: + resources: + requests: + storage: 20Gi + # IMPORTANT: Change the storage class depending on your environment + storageClassName: local-storage + volumeMode: Block + accessModes: + - ReadWriteOnce + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement + onlyApplyOSDPlacement: false + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0 diff --git a/go.mod b/go.mod index 44f949962656..36ea1922b0fb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/IBM/keyprotect-go-client v0.12.2 - github.com/aws/aws-sdk-go v1.46.1 + github.com/aws/aws-sdk-go v1.46.6 github.com/banzaicloud/k8s-objectmatcher v1.8.0 github.com/ceph/go-ceph v0.24.0 github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8 @@ -12,7 +12,7 @@ require ( github.com/gemalto/kmip-go v0.0.10 github.com/go-ini/ini v1.67.0 github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.3.1 + github.com/google/uuid v1.4.0 github.com/hashicorp/vault/api v1.10.0 github.com/jetstack/cert-manager v1.7.3 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 @@ -39,7 +39,7 @@ require ( k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/mcs-api v0.1.0 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/yaml v1.4.0 ) require ( diff --git a/go.sum b/go.sum index 2e3ed19f09d6..a62cf180bcc7 100644 --- a/go.sum +++ b/go.sum @@ -450,8 +450,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.46.1 h1:U26quvBWFZMQuultLw5tloW4GnmWaChEwMZNq8uYatw= -github.com/aws/aws-sdk-go v1.46.1/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.46.6 h1:6wFnNC9hETIZLMf6SOTN7IcclrOGwp/n9SLp8Pjt6E8= +github.com/aws/aws-sdk-go v1.46.6/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A= github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -763,8 +763,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1968,5 +1968,6 @@ sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6Lv sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/images/ceph/Makefile b/images/ceph/Makefile index e8e6033cf71a..992fb1e4e747 100755 --- a/images/ceph/Makefile +++ b/images/ceph/Makefile @@ -18,9 +18,9 @@ include ../image.mk # Image Build Options ifeq ($(GOARCH),amd64) -CEPH_VERSION ?= v17.2.6-20230410 +CEPH_VERSION ?= v18.2.0-20231018 else -CEPH_VERSION ?= v17.2.6-20230410 +CEPH_VERSION ?= v18.2.0-20231018 endif REGISTRY_NAME = quay.io BASEIMAGE = $(REGISTRY_NAME)/ceph/ceph-$(GOARCH):$(CEPH_VERSION) diff --git a/pkg/operator/ceph/cluster/mon/health.go b/pkg/operator/ceph/cluster/mon/health.go index 4afdfc93a855..6727dd62ddb8 100644 --- a/pkg/operator/ceph/cluster/mon/health.go +++ b/pkg/operator/ceph/cluster/mon/health.go @@ -366,9 +366,32 @@ func (c *Cluster) checkHealth(ctx context.Context) error { } } + // failover mons running on host path to use persistent volumes if VolumeClaimTemplate is set and vice versa + for _, mon := range c.ClusterInfo.Monitors { + if c.HasMonPathChanged(mon.Name) { + logger.Infof("fail over mon %q due to change in mon path", mon.Name) + c.failMon(len(c.ClusterInfo.Monitors), desiredMonCount, mon.Name) + return nil + } + } + return nil } +// HasMonPathChanged checks if the mon storage path has changed from host path to persistent volume or vice versa +func (c *Cluster) HasMonPathChanged(mon string) bool { + var monPathChanged bool + if c.mapping.Schedule[mon] != nil && c.spec.Mon.VolumeClaimTemplate != nil { + logger.Infof("mon %q path has changed from host path to persistent volumes", mon) + monPathChanged = true + } else if c.mapping.Schedule[mon] == nil && c.spec.Mon.VolumeClaimTemplate == nil { + logger.Infof("mon %q path has changed from persistent volumes to host path", mon) + monPathChanged = true + } + + return monPathChanged +} + func (c *Cluster) trackMonInOrOutOfQuorum(monName string, inQuorum bool) (bool, error) { updateNeeded := false var monsOutOfQuorum []string diff --git a/pkg/operator/ceph/cluster/mon/health_test.go b/pkg/operator/ceph/cluster/mon/health_test.go index bc0b7297aab2..bef7fda8d1dd 100644 --- a/pkg/operator/ceph/cluster/mon/health_test.go +++ b/pkg/operator/ceph/cluster/mon/health_test.go @@ -645,3 +645,37 @@ func TestUpdateMonInterval(t *testing.T) { assert.Equal(t, time.Minute, h.interval) }) } + +func TestHasMonPathChanged(t *testing.T) { + t.Run("mon path changed from pv to hostpath", func(t *testing.T) { + c := New(context.TODO(), &clusterd.Context{}, "ns", cephv1.ClusterSpec{}, nil) + c.mapping.Schedule["a"] = nil + result := c.HasMonPathChanged("a") + assert.True(t, result) + }) + + t.Run("mon path has not changed from pv to hostpath", func(t *testing.T) { + c := New(context.TODO(), &clusterd.Context{}, "ns", cephv1.ClusterSpec{}, nil) + c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{Spec: v1.PersistentVolumeClaimSpec{}} + c.mapping.Schedule["b"] = nil + result := c.HasMonPathChanged("b") + c.spec.Mon.VolumeClaimTemplate = nil + assert.False(t, result) + }) + + t.Run("mon path changed from hostpath to pv", func(t *testing.T) { + c := New(context.TODO(), &clusterd.Context{}, "ns", cephv1.ClusterSpec{}, nil) + c.mapping.Schedule["c"] = &opcontroller.MonScheduleInfo{} + c.spec.Mon.VolumeClaimTemplate = &v1.PersistentVolumeClaim{Spec: v1.PersistentVolumeClaimSpec{}} + result := c.HasMonPathChanged("c") + assert.True(t, result) + }) + + t.Run("mon path has not changed from host path to pv", func(t *testing.T) { + c := New(context.TODO(), &clusterd.Context{}, "ns", cephv1.ClusterSpec{}, nil) + c.mapping.Schedule["d"] = &opcontroller.MonScheduleInfo{} + result := c.HasMonPathChanged("d") + c.spec.Mon.VolumeClaimTemplate = nil + assert.False(t, result) + }) +} diff --git a/pkg/operator/ceph/cluster/mon/mon.go b/pkg/operator/ceph/cluster/mon/mon.go index c08d04e14127..1e2201ec0402 100644 --- a/pkg/operator/ceph/cluster/mon/mon.go +++ b/pkg/operator/ceph/cluster/mon/mon.go @@ -1229,6 +1229,12 @@ func (c *Cluster) commitMaxMonIDRequireIncrementing(desiredMaxMonID int, require var updateDeploymentAndWait = UpdateCephDeploymentAndWait func (c *Cluster) updateMon(m *monConfig, d *apps.Deployment) error { + + if c.HasMonPathChanged(m.DaemonName) { + logger.Infof("path has changed for mon %q. Skip updating mon deployment %q in order to failover the mon", m.DaemonName, d.Name) + return nil + } + // Expand mon PVC if storage request for mon has increased in cephcluster crd if c.monVolumeClaimTemplate(m) != nil { desiredPvc, err := c.makeDeploymentPVC(m, false) diff --git a/pkg/operator/ceph/object/admin_test.go b/pkg/operator/ceph/object/admin_test.go index 5e9ab5c367bc..a93ed0ef4960 100644 --- a/pkg/operator/ceph/object/admin_test.go +++ b/pkg/operator/ceph/object/admin_test.go @@ -406,7 +406,7 @@ const firstPeriodUpdate = `{ "id": "1580fd1d-a065-4484-82ff-329e9a779999", "name": "my-store", "api_name": "my-store", - "is_master": "true", + "is_master": true, "endpoints": [ "http://10.105.59.166:80" ], @@ -489,7 +489,7 @@ const secondPeriodGet = `{ "id": "1580fd1d-a065-4484-82ff-329e9a779999", "name": "my-store", "api_name": "my-store", - "is_master": "true", + "is_master": true, "endpoints": [ "http://10.105.59.166:80" ], @@ -575,7 +575,7 @@ const secondPeriodUpdateWithoutChanges = `{ "id": "1580fd1d-a065-4484-82ff-329e9a779999", "name": "my-store", "api_name": "my-store", - "is_master": "true", + "is_master": true, "endpoints": [ "http://10.105.59.166:80" ], @@ -659,7 +659,7 @@ const secondPeriodUpdateWithChanges = `{ "id": "1580fd1d-a065-4484-82ff-329e9a779999", "name": "my-store", "api_name": "my-store", - "is_master": "true", + "is_master": true, "endpoints": [ "http://10.105.59.166:80", "https://10.105.59.166:443" diff --git a/pkg/operator/ceph/object/controller_test.go b/pkg/operator/ceph/object/controller_test.go index 76f3fcfe930d..d58e1a3df149 100644 --- a/pkg/operator/ceph/object/controller_test.go +++ b/pkg/operator/ceph/object/controller_test.go @@ -71,7 +71,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "my-store", "api_name": "my-store", - "is_master": "true", + "is_master": true, "endpoints": [ "http://rook-ceph-rgw-my-store.rook-ceph.svc:80" ], @@ -206,7 +206,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [], "hostnames": [], "hostnames_s3website": [], @@ -242,7 +242,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [ "http://rook-ceph-rgw-my-store.rook-ceph.svc:80" ], diff --git a/pkg/operator/ceph/object/dependents_test.go b/pkg/operator/ceph/object/dependents_test.go index 23e7f22eef90..768b2b94146f 100644 --- a/pkg/operator/ceph/object/dependents_test.go +++ b/pkg/operator/ceph/object/dependents_test.go @@ -46,7 +46,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [ "http://rook-ceph-rgw-store-a.rook-ceph.svc:80" ], @@ -86,7 +86,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [ "http://rook-ceph-rgw-store-a.rook-ceph.svc:80" ], diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go index e2e8d22c17ed..5ca80e7783af 100644 --- a/pkg/operator/ceph/object/objectstore.go +++ b/pkg/operator/ceph/object/objectstore.go @@ -81,7 +81,7 @@ type idType struct { type zoneGroupType struct { MasterZoneID string `json:"master_zone"` - IsMaster string `json:"is_master"` + IsMaster bool `json:"is_master"` Zones []zoneType `json:"zones"` Endpoints []string `json:"endpoints"` } @@ -302,12 +302,7 @@ func checkZoneGroupIsMaster(objContext *Context) (bool, error) { return false, errors.Wrap(err, "failed to parse master zone id") } - zoneGroupIsMaster, err := strconv.ParseBool(zoneGroupJson.IsMaster) - if err != nil { - return false, errors.Wrap(err, "failed to parse is_master from zone group json into bool") - } - - return zoneGroupIsMaster, nil + return zoneGroupJson.IsMaster, nil } func DecodeSecret(secret *v1.Secret, keyName string) (string, error) { diff --git a/pkg/operator/ceph/object/zone/controller_test.go b/pkg/operator/ceph/object/zone/controller_test.go index d0a8814f976f..e90dc44148d0 100644 --- a/pkg/operator/ceph/object/zone/controller_test.go +++ b/pkg/operator/ceph/object/zone/controller_test.go @@ -47,7 +47,7 @@ const ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [ ":80" ], diff --git a/pkg/operator/ceph/object/zonegroup/controller_test.go b/pkg/operator/ceph/object/zonegroup/controller_test.go index 1bc0f53c1963..2eed22f1e83d 100644 --- a/pkg/operator/ceph/object/zonegroup/controller_test.go +++ b/pkg/operator/ceph/object/zonegroup/controller_test.go @@ -87,7 +87,7 @@ var ( "id": "fd8ff110-d3fd-49b4-b24f-f6cd3dddfedf", "name": "zonegroup-a", "api_name": "zonegroup-a", - "is_master": "true", + "is_master": true, "endpoints": [ ":80" ], diff --git a/tests/scripts/create-dev-cluster.sh b/tests/scripts/create-dev-cluster.sh new file mode 100755 index 000000000000..da17b3ab9416 --- /dev/null +++ b/tests/scripts/create-dev-cluster.sh @@ -0,0 +1,170 @@ +#!/usr/bin/env bash + +# Copyright 2021 The Rook Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBECTL="minikube kubectl --" +ROOK_EXAMPLES_DIR="../../deploy/examples/" + +wait_for_ceph_cluster() { + echo "Waiting for ceph cluster" + WAIT_CEPH_CLUSTER_RUNNING=20 + while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.ceph.health}' | grep -q "HEALTH_OK"; do + echo "Waiting for Ceph cluster installed" + sleep ${WAIT_CEPH_CLUSTER_RUNNING} + done + echo "Ceph cluster installed and running" +} + +get_minikube_driver() { + os=$(uname) + architecture=$(uname -m) + if [[ "$os" == "Darwin" ]]; then + if [[ "$architecture" == "x86_64" ]]; then + echo "hyperkit" + elif [[ "$architecture" == "arm64" ]]; then + echo "qemu" + else + echo "Unknown Architecture on Apple OS" + exit 1 + fi + elif [[ "$os" == "Linux" ]]; then + echo "kvm2" + else + echo "Unknown/Unsupported OS" + exit 1 + fi +} + +show_ceph_dashboard_info() { + DASHBOARD_PASSWORD=$($KUBECTL -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo) + IP_ADDR=$($KUBECTL get po --selector="app=rook-ceph-mgr" -n rook-ceph --output jsonpath='{.items[*].status.hostIP}') + PORT="$($KUBECTL -n rook-ceph -o=jsonpath='{.spec.ports[?(@.name == "dashboard")].nodePort}' get services rook-ceph-mgr-dashboard-external-http)" + BASE_URL="http://$IP_ADDR:$PORT" + echo "===========================" + echo "Ceph Dashboard: " + echo " IP_ADDRESS: $BASE_URL" + echo " PASSWORD: $DASHBOARD_PASSWORD" + echo "===========================" +} + +check_minikube_exists() { + minikube profile list > /dev/null 2>&1 + local retcode=$? + + if [ $retcode -eq 0 ]; then + echo "A minikube environment already exists, please use -f to force the cluster creation." + exit 1 + fi +} + +setup_minikube_env() { + minikube_driver="$(get_minikube_driver)" + echo "Setting up minikube env (using $minikube_driver driver)" + minikube delete + minikube start --disk-size=40g --extra-disks=3 --driver "$minikube_driver" + eval "$(minikube docker-env -p minikube)" +} + +create_rook_cluster() { + echo "Creating cluster" + $KUBECTL apply -f crds.yaml -f common.yaml -f operator.yaml + $KUBECTL apply -f cluster-test.yaml -f toolbox.yaml + $KUBECTL apply -f dashboard-external-http.yaml +} + +check_examples_dir() { + CRDS_FILE="crds.yaml" + if [ ! -e ${CRDS_FILE} ]; then + echo "File ${ROOK_EXAMPLES_DIR}/${CRDS_FILE} does not exist. Please, provide a valid rook examples directory." + exit 1 + fi +} + +wait_for_rook_operator() { + echo "Waiting for rook operator" + $KUBECTL rollout status deployment rook-ceph-operator -n rook-ceph --timeout=180s + while ! $KUBECTL get cephclusters.ceph.rook.io -n rook-ceph -o jsonpath='{.items[?(@.kind == "CephCluster")].status.phase}' | grep -q "Ready"; do + echo "Waiting for cluster to be ready..." + sleep 20 + done +} + +enable_rook_orchestrator() { + echo "Enabling rook orchestrator" + $KUBECTL rollout status deployment rook-ceph-tools -n rook-ceph --timeout=30s + kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph mgr module enable rook + kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph orch set backend rook + kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph orch status +} + +show_usage() { + echo "" + echo " Usage: $(basename "$0") [-r] [-d /path/to/rook-examples/dir]" + echo " -r Enable rook orchestrator" + echo " -d value Path to Rook examples directory (i.e github.com/rook/rook/deploy/examples)" +} + +#################################################################### +################# MAIN ############################################# + +while getopts ":hrfd:" opt; do + case $opt in + h) + show_usage + exit 0 + ;; + r) + enable_rook=true + ;; + f) + force_minikube=true + ;; + d) + ROOK_EXAMPLES_DIR="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + show_usage + exit 1 + ;; + :) + echo "Option -$OPTARG requires an argument." >&2 + exit 1 + ;; + esac +done + +echo "Using '$ROOK_EXAMPLES_DIR' as examples directory.." + +cd "$ROOK_EXAMPLES_DIR" || exit +check_examples_dir + +if [ -z "$force_minikube" ]; then + check_minikube_exists +fi + +setup_minikube_env +create_rook_cluster +wait_for_rook_operator +wait_for_ceph_cluster + +if [ "$enable_rook" = true ]; then + enable_rook_orchestrator +fi + +show_ceph_dashboard_info + +#################################################################### +####################################################################