diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index 5df527162fdb..bb096e215db8 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -4,7 +4,7 @@ on: workflow_call: inputs: ceph_images: - description: 'JSON list of Ceph images for creating Ceph cluster' + description: "JSON list of Ceph images for creating Ceph cluster" default: '["quay.io/ceph/ceph:v18"]' type: string @@ -229,6 +229,39 @@ jobs: echo "script failed because wrong realm was passed" fi + - name: test topology flags + run: | + toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') + # create 3 replica-1 pools + sed -i 's/replicapool/replica1a/' deploy/examples/pool-test.yaml + kubectl create -f deploy/examples/pool-test.yaml + sed -i 's/replica1a/replica1b/' deploy/examples/pool-test.yaml + kubectl create -f deploy/examples/pool-test.yaml + sed -i 's/replica1b/replica1c/' deploy/examples/pool-test.yaml + kubectl create -f deploy/examples/pool-test.yaml + # bring back the original file + sed -i 's/replica1c/replicapool/' deploy/examples/pool-test.yaml + + # check and wait for the pools to get ready + kubectl wait --for='jsonpath={.status.phase}=Ready' Cephblockpool/replica1a -nrook-ceph + kubectl wait --for='jsonpath={.status.phase}=Ready' Cephblockpool/replica1b -nrook-ceph + kubectl wait --for='jsonpath={.status.phase}=Ready' Cephblockpool/replica1c -nrook-ceph + + # pass correct flags + kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool --topology-pools replica1a,replica1b,replica1c --topology-failure-domain-label hostname --topology-failure-domain-values minikube,minikube-m02,minikube-m03 + # pass the pool which is not exists + if output=$(kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool --topology-pools ab,cd,ef --topology-failure-domain-label hostname --topology-failure-domain-values minikube,minikube-m02,minikube-m03); then + echo "script run completed with stderr error after passing the wrong pools: $output" + else + echo "script failed because wrong pools doesn't exist" + fi + # dont pass all topology flags + if output=$(kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool --topology-pools replica1a,replica1b,replica1c --topology-failure-domain-values minikube,minikube-m02,minikube-m03); then + echo "script run completed with stderr error after passing the wrong flags: $output" + else + echo "script failed because topology-failure-domain-label is missing" + fi + - name: test enable v2 mon port run: | toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') diff --git a/Documentation/CRDs/Cluster/.pages b/Documentation/CRDs/Cluster/.pages index 001ac3924b7b..524ac4b14ea3 100644 --- a/Documentation/CRDs/Cluster/.pages +++ b/Documentation/CRDs/Cluster/.pages @@ -4,5 +4,5 @@ nav: - host-cluster.md - pvc-cluster.md - stretch-cluster.md - - external-cluster.md + - external-cluster - ... diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md index 7e82947873fb..d32edd605d7a 100755 --- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md +++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md @@ -8,7 +8,7 @@ There are primarily four different modes in which to create your cluster. 1. [Host Storage Cluster](host-cluster.md): Consume storage from host paths and raw devices 2. [PVC Storage Cluster](pvc-cluster.md): Dynamically provision storage underneath Rook by specifying the storage class Rook should use to consume storage (via PVCs) 3. [Stretched Storage Cluster](stretch-cluster.md): Distribute Ceph mons across three zones, while storage (OSDs) is only configured in two zones -4. [External Ceph Cluster](external-cluster.md): Connect your K8s applications to an external Ceph cluster +4. [External Ceph Cluster](external-cluster/external-cluster.md): Connect your K8s applications to an external Ceph cluster See the separate topics for a description and examples of each of these scenarios. @@ -24,7 +24,7 @@ Settings can be specified at the global level to apply to the cluster as a whole ### Cluster Settings * `external`: - * `enable`: if `true`, the cluster will not be managed by Rook but via an external entity. This mode is intended to connect to an existing cluster. In this case, Rook will only consume the external cluster. However, Rook will be able to deploy various daemons in Kubernetes such as object gateways, mds and nfs if an image is provided and will refuse otherwise. If this setting is enabled **all** the other options will be ignored except `cephVersion.image` and `dataDirHostPath`. See [external cluster configuration](external-cluster.md). If `cephVersion.image` is left blank, Rook will refuse the creation of extra CRs like object, file and nfs. + * `enable`: if `true`, the cluster will not be managed by Rook but via an external entity. This mode is intended to connect to an existing cluster. In this case, Rook will only consume the external cluster. However, Rook will be able to deploy various daemons in Kubernetes such as object gateways, mds and nfs if an image is provided and will refuse otherwise. If this setting is enabled **all** the other options will be ignored except `cephVersion.image` and `dataDirHostPath`. See [external cluster configuration](external-cluster/external-cluster.md). If `cephVersion.image` is left blank, Rook will refuse the creation of extra CRs like object, file and nfs. * `cephVersion`: The version information for launching the ceph daemons. * `image`: The image used for running the ceph daemons. For example, `quay.io/ceph/ceph:v18.2.2`. For more details read the [container images section](#ceph-container-images). For the latest ceph images, see the [Ceph DockerHub](https://hub.docker.com/r/ceph/ceph/tags/). diff --git a/Documentation/CRDs/Cluster/external-cluster/.pages b/Documentation/CRDs/Cluster/external-cluster/.pages new file mode 100644 index 000000000000..5a3a6ca9e41c --- /dev/null +++ b/Documentation/CRDs/Cluster/external-cluster/.pages @@ -0,0 +1,3 @@ +nav: + - external-cluster.md + - topology-for-external-mode.md diff --git a/Documentation/CRDs/Cluster/external-cluster.md b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md similarity index 92% rename from Documentation/CRDs/Cluster/external-cluster.md rename to Documentation/CRDs/Cluster/external-cluster/external-cluster.md index 7f1155946100..bc0db2bb05e1 100644 --- a/Documentation/CRDs/Cluster/external-cluster.md +++ b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md @@ -60,6 +60,9 @@ python3 create-external-cluster-resources.py --rbd-data-pool-name -- * `--upgrade`: (optional) Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied. * `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem. * `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons. +* `--topology-pools`: (optional) Comma-separated list of topology-constrained rbd pools +* `--topology-failure-domain-label`: (optional) K8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain +* `--topology-failure-domain-values`: (optional) Comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list ### Multi-tenancy @@ -84,6 +87,15 @@ See the [Multisite doc](https://docs.ceph.com/en/quincy/radosgw/multisite/#confi python3 create-external-cluster-resources.py --rbd-data-pool-name --format bash --rgw-endpoint --rgw-realm-name > --rgw-zonegroup-name --rgw-zone-name > ``` +### Topology Based Provisioning + +Enable Topology Based Provisioning for RBD pools by passing `--topology-pools`, `--topology-failure-domain-label` and `--topology-failure-domain-values` flags. +A new storageclass named `ceph-rbd-topology` will be created by the import script with `volumeBindingMode: WaitForFirstConsumer`. +The storageclass is used to create a volume in the pool matching the topology where a pod is scheduled. + +For more details, see the [Topology-Based Provisioning](topology-for-external-mode.md) + + ### Upgrade Example 1) If consumer cluster doesn't have restricted caps, this will upgrade all the default csi-users (non-restricted): @@ -234,7 +246,7 @@ Consume the S3 Storage, in two different ways: ``` !!! hint - For more details see the [Object Store topic](../../Storage-Configuration/Object-Storage-RGW/object-storage.md#connect-to-an-external-object-store) + For more details see the [Object Store topic](../../../Storage-Configuration/Object-Storage-RGW/object-storage.md#connect-to-an-external-object-store) ### Connect to v2 mon port diff --git a/Documentation/CRDs/Cluster/external-cluster/topology-for-external-mode.md b/Documentation/CRDs/Cluster/external-cluster/topology-for-external-mode.md new file mode 100644 index 000000000000..67fda8817a1a --- /dev/null +++ b/Documentation/CRDs/Cluster/external-cluster/topology-for-external-mode.md @@ -0,0 +1,118 @@ +# Topology-Based Provisioning + +## Scenario +Applications like Kafka will have a deployment with multiple running instances. Each service instance will create a new claim and is expected to be located in a different zone. Since the application has its own redundant instances, there is no requirement for redundancy at the data layer. A storage class is created that will provision storage from replica 1 Ceph pools that are located in each of the separate zones. + +## Configuration Flags + +Add the required flags to the script: `create-external-cluster-resources.py`: + +- `--topology-pools`: (optional) Comma-separated list of topology-constrained rbd pools + +- `--topology-failure-domain-label`: (optional) K8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain + +- `--topology-failure-domain-values`: (optional) Comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list + +The import script will then create a new storage class named `ceph-rbd-topology`. + +## Example Configuration + +### Ceph cluster + +Determine the names of the zones (or other failure domains) in the Ceph CRUSH map where each of the pools will have corresponding CRUSH rules. + +Create a zone-specific CRUSH rule for each of the pools. For example, this is a CRUSH rule for `zone-a`: + +``` +$ ceph osd crush rule create-replicated + { + "rule_id": 5, + "rule_name": "rule_host-zone-a-hdd", + "type": 1, + "steps": [ + { + "op": "take", + "item": -10, + "item_name": "zone-a~hdd" + }, + { + "op": "choose_firstn", + "num": 0, + "type": "osd" + }, + { + "op": "emit" + } + ] +} +``` + +Create replica-1 pools based on each of the CRUSH rules from the previous step. Each pool must be created with a CRUSH rule to limit the pool to OSDs in a specific zone. + +!!! note + Disable the ceph warning for replica-1 pools: `ceph config set global mon_allow_pool_size_one true` + +Determine the zones in the K8s cluster that correspond to each of the pools in the Ceph pool. The K8s nodes require labels as defined with the [OSD Topology labels](../ceph-cluster-crd.md#osd-topology). Some environments already have nodes labeled in zones. Set the topology labels on the nodes if not already present. + +Set the flags of the external cluster configuration script based on the pools and failure domains. + +--topology-pools=pool-a,pool-b,pool-c + +--topology-failure-domain-label=zone + +--topology-failure-domain-values=zone-a,zone-b,zone-c + +Then run the python script to generate the settings which will be imported to the Rook cluster: +``` + python3 create-external-cluster-resources.py --rbd-data-pool-name replicapool --topology-pools pool-a,pool-b,pool-c --topology-failure-domain-label zone --topology-failure-domain-values zone-a,zone-b,zone-c +``` + +Output: +``` +export ROOK_EXTERNAL_FSID=8f01d842-d4b2-11ee-b43c-0050568fb522 +.... +.... +.... +export TOPOLOGY_POOLS=pool-a,pool-b,pool-c +export TOPOLOGY_FAILURE_DOMAIN_LABEL=zone +export TOPOLOGY_FAILURE_DOMAIN_VALUES=zone-a,zone-b,zone-c +``` + +### Kubernetes Cluster + +Check the external cluster is created and connected as per the installation steps. +Review the new storage class: +``` +$ kubectl get sc ceph-rbd-topology -o yaml +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: "2024-03-07T12:10:19Z" + name: ceph-rbd-topology + resourceVersion: "82502" + uid: 68448a14-3a78-42c5-ac29-261b6c3404af +parameters: + ... + ... + pool: replicapool + topologyConstrainedPools: | + [ + {"poolName":"pool-a", + "domainSegments":[ + {"domainLabel":"zone","value":"zone-a"}]}, + {"poolName":"pool-b", + "domainSegments":[ + {"domainLabel":"zone","value":"zone-b"}]}, + {"poolName":"pool-c", + "domainSegments":[ + {"domainLabel":"zone","value":"zone-c"}]}, + ] +provisioner: rook-ceph.rbd.csi.ceph.com +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +``` + +#### Create a Topology-Based PVC + +The topology-based storage class is ready to be consumed! Create a PVC from the `ceph-rbd-topology` storage class above, and watch the OSD usage to see how the data is spread only among the topology-based CRUSH buckets. diff --git a/Documentation/Getting-Started/glossary.md b/Documentation/Getting-Started/glossary.md index 5dfd032313d4..492491d3ced7 100644 --- a/Documentation/Getting-Started/glossary.md +++ b/Documentation/Getting-Started/glossary.md @@ -64,7 +64,7 @@ CephRBDMirror CRD is used by Rook to allow creation and updating rbd-mirror daem ### External Storage Cluster -An [external cluster](../CRDs/Cluster/external-cluster.md) is a Ceph configuration that is managed outside of the local K8s cluster. +An [external cluster](../CRDs/Cluster/external-cluster/external-cluster.md) is a Ceph configuration that is managed outside of the local K8s cluster. ### Host Storage Cluster diff --git a/ROADMAP.md b/ROADMAP.md index 377ddcf51a88..749bd0fa8867 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -24,6 +24,7 @@ The following high level features are targeted for Rook v1.14 (April 2024). For * Separate CSI image repository and tag for all images in the helm chart [#13585](https://github.com/rook/rook/issues/13585) * Ceph-CSI [v3.11](https://github.com/ceph/ceph-csi/issues?q=is%3Aopen+is%3Aissue+milestone%3Arelease-v3.11.0) * Add build support for Go 1.22 [#13738](https://github.com/rook/rook/pull/13738) +* Add topology based provisioning for external clusters [#13821](https://github.com/rook/rook/pull/13821) ## Kubectl Plugin diff --git a/deploy/charts/rook-ceph-cluster/templates/securityContextConstraints.yaml b/deploy/charts/rook-ceph-cluster/templates/securityContextConstraints.yaml index f79bcef07f79..82a0bc363b6c 100644 --- a/deploy/charts/rook-ceph-cluster/templates/securityContextConstraints.yaml +++ b/deploy/charts/rook-ceph-cluster/templates/securityContextConstraints.yaml @@ -37,9 +37,8 @@ volumes: - secret users: # A user needs to be added for each rook service account. - - system:serviceaccount:{{ .Release.Namespace }}:default + - system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-default - system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-mgr - system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-osd - system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-rgw - - system:serviceaccount:{{ .Release.Namespace }}:rook-ceph-default {{- end }} diff --git a/deploy/examples/common-external.yaml b/deploy/examples/common-external.yaml index 51f1c5fbeb6c..03e7192d9257 100644 --- a/deploy/examples/common-external.yaml +++ b/deploy/examples/common-external.yaml @@ -57,6 +57,12 @@ metadata: name: rook-ceph-cmd-reporter namespace: rook-ceph-external # namespace:cluster --- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-default + namespace: rook-ceph-external # namespace:cluster +--- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py index 61039c9eb1bd..b4404f745370 100644 --- a/deploy/examples/create-external-cluster-resources.py +++ b/deploy/examples/create-external-cluster-resources.py @@ -474,6 +474,24 @@ def gen_arg_parser(cls, args_to_parse=None): required=False, help="provides the name of the rgw-zonegroup", ) + output_group.add_argument( + "--topology-pools", + default="", + required=False, + help="comma-separated list of topology-constrained rbd pools", + ) + output_group.add_argument( + "--topology-failure-domain-label", + default="", + required=False, + help="k8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain", + ) + output_group.add_argument( + "--topology-failure-domain-values", + default="", + required=False, + help="comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list", + ) upgrade_group = argP.add_argument_group("upgrade") upgrade_group.add_argument( @@ -1321,16 +1339,15 @@ def create_rgw_admin_ops_user(self): "", ) - def validate_rbd_pool(self): - if not self.cluster.pool_exists(self._arg_parser.rbd_data_pool_name): + def validate_rbd_pool(self, pool_name): + if not self.cluster.pool_exists(pool_name): raise ExecutionFailureException( - f"The provided pool, '{self._arg_parser.rbd_data_pool_name}', does not exist" + f"The provided pool, '{pool_name}', does not exist" ) - def init_rbd_pool(self): + def init_rbd_pool(self, rbd_pool_name): if isinstance(self.cluster, DummyRados): return - rbd_pool_name = self._arg_parser.rbd_data_pool_name ioctx = self.cluster.open_ioctx(rbd_pool_name) rbd_inst = rbd.RBD() rbd_inst.pool_init(ioctx, True) @@ -1501,6 +1518,54 @@ def validate_rgw_multisite(self, rgw_multisite_config_name, rgw_multisite_config return "-1" return "" + def convert_comma_separated_to_array(self, value): + return value.split(",") + + def raise_exception_if_any_topology_flag_is_missing(self): + if ( + ( + self._arg_parser.topology_pools != "" + and ( + self._arg_parser.topology_failure_domain_label == "" + or self._arg_parser.topology_failure_domain_values == "" + ) + ) + or ( + self._arg_parser.topology_failure_domain_label != "" + and ( + self._arg_parser.topology_pools == "" + or self._arg_parser.topology_failure_domain_values == "" + ) + ) + or ( + self._arg_parser.topology_failure_domain_values != "" + and ( + self._arg_parser.topology_pools == "" + or self._arg_parser.topology_failure_domain_label == "" + ) + ) + ): + raise ExecutionFailureException( + "provide all the topology flags --topology-pools, --topology-failure-domain-label, --topology-failure-domain-values" + ) + + def validate_topology_values(self, topology_pools, topology_fd): + if len(topology_pools) != len(topology_fd): + raise ExecutionFailureException( + f"The provided topology pools, '{topology_pools}', and " + f"topology failure domain, '{topology_fd}'," + f"are of different length, '{len(topology_pools)}' and '{len(topology_fd)}' respctively" + ) + return + + def validate_topology_rbd_pools(self, topology_rbd_pools): + for pool in topology_rbd_pools: + self.validate_rbd_pool(pool) + + def init_topology_rbd_pools(self, topology_rbd_pools): + for pool in topology_rbd_pools: + self.init_rbd_pool(pool) + def _gen_output_map(self): if self.out_map: return @@ -1510,8 +1575,8 @@ def _gen_output_map(self): self._arg_parser.k8s_cluster_name = ( self._arg_parser.k8s_cluster_name.lower() ) # always convert cluster name to lowercase characters - self.validate_rbd_pool() - self.init_rbd_pool() + self.validate_rbd_pool(self._arg_parser.rbd_data_pool_name) + self.init_rbd_pool(self._arg_parser.rbd_data_pool_name) self.validate_rados_namespace() self._excluded_keys.add("K8S_CLUSTER_NAME") self.get_cephfs_data_pool_details() @@ -1585,6 +1650,33 @@ def _gen_output_map(self): self.out_map["RBD_METADATA_EC_POOL_NAME"] = ( self.validate_rbd_metadata_ec_pool_name() ) + self.out_map["TOPOLOGY_POOLS"] = self._arg_parser.topology_pools + self.out_map["TOPOLOGY_FAILURE_DOMAIN_LABEL"] = ( + self._arg_parser.topology_failure_domain_label + ) + self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] = ( + self._arg_parser.topology_failure_domain_values + ) + if ( + self._arg_parser.topology_pools != "" + and self._arg_parser.topology_failure_domain_label != "" + and self._arg_parser.topology_failure_domain_values != "" + ): + self.validate_topology_values( + self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]), + self.convert_comma_separated_to_array( + self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] + ), + ) + self.validate_topology_rbd_pools( + self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]) + ) + self.init_topology_rbd_pools( + self.convert_comma_separated_to_array(self.out_map["TOPOLOGY_POOLS"]) + ) + else: + self.raise_exception_if_any_topology_flag_is_missing() + self.out_map["RGW_POOL_PREFIX"] = self._arg_parser.rgw_pool_prefix self.out_map["RGW_ENDPOINT"] = "" if self._arg_parser.rgw_endpoint: @@ -1821,6 +1913,33 @@ def gen_json_out(self): } ) + # if 'TOPOLOGY_POOLS', 'TOPOLOGY_FAILURE_DOMAIN_LABEL', 'TOPOLOGY_FAILURE_DOMAIN_VALUES' exists, + # then only add 'topology' StorageClass + if ( + self.out_map["TOPOLOGY_POOLS"] + and self.out_map["TOPOLOGY_FAILURE_DOMAIN_LABEL"] + and self.out_map["TOPOLOGY_FAILURE_DOMAIN_VALUES"] + ): + json_out.append( + { + "name": "ceph-rbd-topology", + "kind": "StorageClass", + "data": { + "topologyFailureDomainLabel": self.out_map[ + "TOPOLOGY_FAILURE_DOMAIN_LABEL" + ], + "topologyFailureDomainValues": self.out_map[ + "TOPOLOGY_FAILURE_DOMAIN_VALUES" + ], + "topologyPools": self.out_map["TOPOLOGY_POOLS"], + "pool": self.out_map["RBD_POOL_NAME"], + "csi.storage.k8s.io/provisioner-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", + "csi.storage.k8s.io/controller-expand-secret-name": f"rook-{self.out_map['CSI_RBD_PROVISIONER_SECRET_NAME']}", + "csi.storage.k8s.io/node-stage-secret-name": f"rook-{self.out_map['CSI_RBD_NODE_SECRET_NAME']}", + }, + } + ) + # if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass if self.out_map["CEPHFS_FS_NAME"]: json_out.append( diff --git a/deploy/examples/direct-mount.yaml b/deploy/examples/direct-mount.yaml index db4487eb51ac..2788c7fc6d81 100644 --- a/deploy/examples/direct-mount.yaml +++ b/deploy/examples/direct-mount.yaml @@ -16,6 +16,7 @@ spec: app: rook-direct-mount spec: dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: rook-ceph-default containers: - name: rook-direct-mount image: rook/ceph:master diff --git a/deploy/examples/import-external-cluster.sh b/deploy/examples/import-external-cluster.sh index 77381e715a29..2da209316974 100644 --- a/deploy/examples/import-external-cluster.sh +++ b/deploy/examples/import-external-cluster.sh @@ -19,9 +19,10 @@ ROOK_RBD_FEATURES=${ROOK_RBD_FEATURES:-"layering"} ROOK_EXTERNAL_MAX_MON_ID=2 ROOK_EXTERNAL_MAPPING={} RBD_STORAGE_CLASS_NAME=ceph-rbd +RBD_TOPOLOGY_STORAGE_CLASS_NAME=ceph-rbd-topology CEPHFS_STORAGE_CLASS_NAME=cephfs ROOK_EXTERNAL_MONITOR_SECRET=mon-secret -OPERATOR_NAMESPACE=rook-ceph # default set to rook-ceph +OPERATOR_NAMESPACE=rook-ceph # default set to rook-ceph CSI_DRIVER_NAME_PREFIX=${CSI_DRIVER_NAME_PREFIX:-$OPERATOR_NAMESPACE} RBD_PROVISIONER=$CSI_DRIVER_NAME_PREFIX".rbd.csi.ceph.com" # csi-provisioner-name CEPHFS_PROVISIONER=$CSI_DRIVER_NAME_PREFIX".cephfs.csi.ceph.com" # csi-provisioner-name @@ -298,6 +299,63 @@ eof fi } +function getTopologyTemplate() { + topology=$( + cat <<-END + {"poolName":"$1", + "domainSegments":[ + {"domainLabel":"$2","value":"$3"}]}, +END + ) +} + +function createTopology() { + TOPOLOGY="" + declare -a topology_failure_domain_values_array=() + declare -a topology_pools_array=() + topology_pools=("$(echo "$TOPOLOGY_POOLS" | tr "," "\n")") + for i in ${topology_pools[0]}; do topology_pools_array+=("$i"); done + topology_failure_domain_values=("$(echo "$TOPOLOGY_FAILURE_DOMAIN_VALUES" | tr "," "\n")") + for i in ${topology_failure_domain_values[0]}; do topology_failure_domain_values_array+=("$i"); done + for ((i = 0; i < ${#topology_failure_domain_values_array[@]}; i++)); do + getTopologyTemplate "${topology_pools_array[$i]}" "$TOPOLOGY_FAILURE_DOMAIN_LABEL" "${topology_failure_domain_values_array[$i]}" + TOPOLOGY="$TOPOLOGY"$'\n'"$topology" + topology="" + done +} + +function createRBDTopologyStorageClass() { + if ! kubectl -n "$NAMESPACE" get storageclass $RBD_TOPOLOGY_STORAGE_CLASS_NAME &>/dev/null; then + cat </dev/null; then cat <