From 8fa8612201a153b7979a71ac42bdca6337bdb416 Mon Sep 17 00:00:00 2001
From: sp98
Date: Tue, 27 Aug 2024 10:40:35 +0530
Subject: [PATCH 01/40] core: check for duplicate ceph fs pool names
Only single pool will get created if there are multiple
data pool entries with same name. This PR just adds a check
to fail if duplicate pools are present.
Signed-off-by: sp98
---
pkg/operator/ceph/file/filesystem.go | 23 +++++++++++++++++++++++
pkg/operator/ceph/file/filesystem_test.go | 20 ++++++++++++++++++++
2 files changed, 43 insertions(+)
diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go
index 898875c6b7e6..8a35ab1a0231 100644
--- a/pkg/operator/ceph/file/filesystem.go
+++ b/pkg/operator/ceph/file/filesystem.go
@@ -144,6 +144,14 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
if len(f.Spec.DataPools) == 0 {
return nil
}
+
+ // Ensure duplicate pool names are not present in the spec.
+ if len(f.Spec.DataPools) > 1 {
+ if hasDuplicatePoolNames(f.Spec.DataPools) {
+ return errors.New("duplicate pool names in the data pool spec")
+ }
+ }
+
if err := cephpool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &f.Spec.MetadataPool); err != nil {
return errors.Wrap(err, "invalid metadata pool")
}
@@ -157,6 +165,21 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
return nil
}
+func hasDuplicatePoolNames(poolSpecList []cephv1.NamedPoolSpec) bool {
+ poolNames := make(map[string]struct{})
+ for _, poolSpec := range poolSpecList {
+ if poolSpec.Name != "" {
+ if _, has := poolNames[poolSpec.Name]; has {
+ logger.Errorf("duplicate pool name %q in the data pool spec", poolSpec.Name)
+ return true
+ }
+ poolNames[poolSpec.Name] = struct{}{}
+ }
+ }
+
+ return false
+}
+
// newFS creates a new instance of the file (MDS) service
func newFS(name, namespace string) *Filesystem {
return &Filesystem{
diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go
index ffe8aaf5dccd..856add3885eb 100644
--- a/pkg/operator/ceph/file/filesystem_test.go
+++ b/pkg/operator/ceph/file/filesystem_test.go
@@ -73,6 +73,26 @@ func TestValidateSpec(t *testing.T) {
assert.Nil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs))
}
+func TestHasDuplicatePoolNames(t *testing.T) {
+ // PoolSpec with no duplicates
+ fs := &cephv1.CephFilesystem{
+ Spec: cephv1.FilesystemSpec{
+ DataPools: []cephv1.NamedPoolSpec{
+ {Name: "pool1"},
+ {Name: "pool2"},
+ },
+ },
+ }
+
+ result := hasDuplicatePoolNames(fs.Spec.DataPools)
+ assert.False(t, result)
+
+ // add duplicate pool name in the spec.
+ fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.NamedPoolSpec{Name: "pool1"})
+ result = hasDuplicatePoolNames(fs.Spec.DataPools)
+ assert.True(t, result)
+}
+
func TestGenerateDataPoolNames(t *testing.T) {
fs := &Filesystem{Name: "fake", Namespace: "fake"}
fsSpec := cephv1.FilesystemSpec{
From 93179a41f37752b9f45621fc56301295c685a87c Mon Sep 17 00:00:00 2001
From: Michael Adam
Date: Wed, 28 Aug 2024 09:58:26 +0200
Subject: [PATCH 02/40] ci: slightly rework the docs-check workflow
This reworks the docs-check ci workflow in several ways:
* It renames the make target 'check-docs' to the more systematic 'check.docs'.
* It adds a 'docs'mode to the files validation script, and uses the script in `make check.docs`.
Overall, the workflow and local make targets are more systematic and
consistent with this change.
Signed-off-by: Michael Adam
---
.github/workflows/docs-check.yml | 2 +-
Documentation/Contributing/documentation.md | 2 +-
Makefile | 7 ++-----
tests/scripts/validate_modified_files.sh | 6 +++++-
4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml
index c060ce5d4b74..1a13e842d604 100644
--- a/.github/workflows/docs-check.yml
+++ b/.github/workflows/docs-check.yml
@@ -45,7 +45,7 @@ jobs:
- name: Check helm-docs
run: make check-helm-docs
- name: Check docs
- run: make check-docs
+ run: make check.docs
- name: Install mkdocs and dependencies
run: cd build/release/ && make deps.docs
diff --git a/Documentation/Contributing/documentation.md b/Documentation/Contributing/documentation.md
index ef9e1d370bdb..aa88f6a9e425 100644
--- a/Documentation/Contributing/documentation.md
+++ b/Documentation/Contributing/documentation.md
@@ -35,4 +35,4 @@ When previewing, now you can navigate your browser to [http://127.0.0.1:8000/](h
## Making docs
[helm-docs](https://github.com/norwoodj/helm-docs) is a tool that generates the documentation for a helm chart automatically. If there are changes in the helm chart, the developer needs to run `make docs` (to run helm-docs) and check in the resulting autogenerated files.
-To make it easy to check locally for uncommitted changes generated by `make docs`, an additional `make` target exists: simply running `make check-docs` will run the docs auto-generation and will complain if this produces uncommitted changes to doc files. It is therefore a good habit to always run `make check-docs` locally before creating or updating a PR.
+To make it easy to check locally for uncommitted changes generated by `make docs`, an additional `make` target exists: simply running `make check.docs` will run the docs auto-generation and will complain if this produces uncommitted changes to doc files. It is therefore a good habit to always run `make check.docs` locally before creating or updating a PR.
diff --git a/Makefile b/Makefile
index 29d1c24839eb..825664cf7937 100644
--- a/Makefile
+++ b/Makefile
@@ -207,12 +207,9 @@ check-helm-docs:
echo "Please run 'make helm-docs' locally, commit the updated docs, and push the change. See https://rook.io/docs/rook/latest/Contributing/documentation/#making-docs" ; \
exit 2 ; \
};
-check-docs:
+check.docs:
@$(MAKE) docs
- @git diff --exit-code || { \
- echo "Please run 'make docs' locally, commit the updated docs, and push the change." ; \
- exit 2 ; \
- };
+ @tests/scripts/validate_modified_files.sh docs
docs-preview: ## Preview the documentation through mkdocs
diff --git a/tests/scripts/validate_modified_files.sh b/tests/scripts/validate_modified_files.sh
index 1b51d81ca9d7..27cdb13f7279 100755
--- a/tests/scripts/validate_modified_files.sh
+++ b/tests/scripts/validate_modified_files.sh
@@ -9,6 +9,7 @@ MOD_ERR="changes found by mod.check. You may need to run make clean"
CRD_ERR="changes found by 'make crds'. please run 'make crds' locally and update your PR"
BUILD_ERR="changes found by make build', please commit your go.sum or other changed files"
HELM_ERR="changes found by 'make gen-rbac'. please run 'make gen-rbac' locally and update your PR"
+DOCS_ERR="changes found by 'make docs'. please run 'make docs' locally and update your PR"
#############
# FUNCTIONS #
@@ -29,6 +30,9 @@ function validate(){
# MAIN #
########
case "$1" in
+ docs)
+ validate "$DOCS_ERR"
+ ;;
codegen)
validate "$CODEGEN_ERR"
;;
@@ -45,6 +49,6 @@ case "$1" in
validate "$HELM_ERR"
;;
*)
- echo $"Usage: $0 {codegen|modcheck|crd|build|gen-rbac}"
+ echo $"Usage: $0 {docs|codegen|modcheck|crd|build|gen-rbac}"
exit 1
esac
From 59175f0b408cbeae5531927a9cb9e760034bb221 Mon Sep 17 00:00:00 2001
From: Artem Torubarov
Date: Fri, 6 Sep 2024 16:02:53 +0200
Subject: [PATCH 03/40] rgw: pool placement
Signed-off-by: Artem Torubarov
---
Documentation/CRDs/specification.md | 146 ++
.../Object-Storage-RGW/object-storage.md | 82 +-
.../charts/rook-ceph/templates/resources.yaml | 132 +-
deploy/examples/crds.yaml | 132 +-
pkg/apis/ceph.rook.io/v1/types.go | 68 +-
pkg/operator/ceph/object/admin.go | 12 +-
pkg/operator/ceph/object/controller.go | 13 +-
pkg/operator/ceph/object/json_helpers.go | 125 ++
pkg/operator/ceph/object/json_helpers_test.go | 509 +++++
pkg/operator/ceph/object/objectstore.go | 403 ++--
pkg/operator/ceph/object/objectstore_test.go | 815 ++++++--
pkg/operator/ceph/object/shared_pools.go | 510 +++++
pkg/operator/ceph/object/shared_pools_test.go | 1803 +++++++++++++++++
pkg/operator/ceph/object/zone/controller.go | 26 +-
14 files changed, 4421 insertions(+), 355 deletions(-)
create mode 100644 pkg/operator/ceph/object/json_helpers.go
create mode 100644 pkg/operator/ceph/object/json_helpers_test.go
create mode 100644 pkg/operator/ceph/object/shared_pools.go
create mode 100644 pkg/operator/ceph/object/shared_pools_test.go
diff --git a/Documentation/CRDs/specification.md b/Documentation/CRDs/specification.md
index 4e796082477e..662c8575c97f 100644
--- a/Documentation/CRDs/specification.md
+++ b/Documentation/CRDs/specification.md
@@ -9418,6 +9418,7 @@ string
+(Optional)
The metadata pool used for creating RADOS namespaces in the object store
@@ -9429,6 +9430,7 @@ string
+(Optional)
The data pool used for creating RADOS namespaces in the object store
@@ -9444,6 +9446,28 @@ bool
Whether the RADOS namespaces should be preserved on deletion of the object store
+
+
+poolPlacements
+
+
+[]PoolPlacementSpec
+
+
+
+
+(Optional)
+PoolPlacements control which Pools are associated with a particular RGW bucket.
+Once PoolPlacements are defined, RGW client will be able to associate pool
+with ObjectStore bucket by providing “” during s3 bucket creation
+or “X-Storage-Policy” header during swift container creation.
+See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+PoolPlacement with name: “default” will be used as a default pool if no option
+is provided during bucket creation.
+If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+
+
ObjectStoreHostingSpec
@@ -10624,6 +10648,49 @@ the triple using the matching operator
PlacementSpec is the placement for core ceph daemons part of the CephCluster CRD
+PlacementStorageClassSpec
+
+
+(Appears on: PoolPlacementSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+however most clients/libs insist on AWS names so it is recommended to use
+one of the valid x-amz-storage-class values for better compatibility:
+REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+
+
+
+
+dataPoolName
+
+string
+
+
+
+DataPoolName is the data pool used to store ObjectStore objects data.
+
+
+
+
PoolMirroringInfo
@@ -10780,6 +10847,85 @@ StatesSpec
+
PoolPlacementSpec
+
+
+(Appears on: ObjectSharedPoolsSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+Pool placement name. Name can be arbitrary. Placement with name “default” will be used as default.
+
+
+
+
+metadataPoolName
+
+string
+
+
+
+The metadata pool used to store ObjectStore bucket index.
+
+
+
+
+dataPoolName
+
+string
+
+
+
+The data pool used to store ObjectStore objects data.
+
+
+
+
+dataNonECPoolName
+
+string
+
+
+
+(Optional)
+The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+
+
+
+
+storageClasses
+
+
+[]PlacementStorageClassSpec
+
+
+
+
+(Optional)
+StorageClasses can be selected by user to override dataPoolName during object creation.
+Each placement has default STANDARD StorageClass pointing to dataPoolName.
+This list allows defining additional StorageClasses on top of default STANDARD storage class.
+
+
+
+
PoolSpec
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
index 50bfcd176a20..4c291a0c2409 100644
--- a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
@@ -14,8 +14,9 @@ Rook can configure the Ceph Object Store for several different scenarios. See ea
1. Create a [local object store](#create-a-local-object-store-with-s3) with dedicated Ceph pools. This option is recommended if a single object store is required, and is the simplest to get started.
2. Create [one or more object stores with shared Ceph pools](#create-local-object-stores-with-shared-pools). This option is recommended when multiple object stores are required.
-3. Connect to an [RGW service in an external Ceph cluster](#connect-to-an-external-object-store), rather than create a local object store.
-4. Configure [RGW Multisite](#object-multisite) to synchronize buckets between object stores in different clusters.
+3. Create [one or more object stores with pool placement targets and storage classes](#create-local-object-stores-with-pool-placements). This configuration allows Rook to provide different object placement options to object store clients.
+4. Connect to an [RGW service in an external Ceph cluster](#connect-to-an-external-object-store), rather than create a local object store.
+5. Configure [RGW Multisite](#object-multisite) to synchronize buckets between object stores in different clusters.
!!! note
Updating the configuration of an object store between these types is not supported.
@@ -188,6 +189,83 @@ To consume the object store, continue below in the section to [Create a bucket](
Modify the default example object store name from `my-store` to the alternate name of the object store
such as `store-a` in this example.
+### Create Local Object Store(s) with pool placements
+
+!!! attention
+ This feature is experimental.
+
+This section contains a guide on how to configure [RGW's pool placement and storage classes](https://docs.ceph.com/en/reef/radosgw/placement/) with Rook.
+
+Object Storage API allows users to override where bucket data will be stored during bucket creation. With `` parameter in S3 API and `X-Storage-Policy` header in SWIFT. Similarly, users can override where object data will be stored by setting `X-Amz-Storage-Class` and `X-Object-Storage-Class` during object creation.
+
+To enable this feature, configure `poolPlacements` representing a list of possible bucket data locations.
+Each `poolPlacement` must have:
+
+* a **unique** `name` to refer to it in `` or `X-Storage-Policy`. A placement with reserved name `default` will be used by default if no location constraint is provided.
+* `dataPoolName` and `metadataPoolName` representing object data and metadata locations. In Rook, these data locations are backed by `CephBlockPool`. `poolPlacements` and `storageClasses` specs refer pools by name. This means that all pools should be defined in advance. Similarly to [sharedPools](#create-local-object-stores-with-shared-pools), the same pool can be reused across multiple ObjectStores and/or poolPlacements/storageClasses because of RADOS namespaces. Here, each pool will be namespaced with `..` key.
+* **optional** `dataNonECPoolName` - extra pool for data that cannot use erasure coding (ex: multi-part uploads). If not set, `metadataPoolName` will be used.
+* **optional** list of placement `storageClasses`. Classes defined per placement, which means that even classes of `default` placement will be available only within this placement and not others. Each placement will automatically have default storage class named `STANDARD`. `STANDARD` class always points to placement `dataPoolName` and cannot be removed or redefined. Each storage class must have:
+ * `name` (unique within placement). RGW allows arbitrary name for StorageClasses, however some clients/libs insist on AWS names so it is recommended to use one of the valid `x-amz-storage-class` values for better compatibility: `STANDARD | REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE`. See [AWS docs](https://aws.amazon.com/s3/storage-classes/).
+ * `dataPoolName` - overrides placement data pool when this class is selected by user.
+
+Example: Configure `CephObjectStore` with `default` placement pointing to `us` pools and placement `europe` pointing to pools in corresponding geographies. These geographical locations are only an example. Placement name can be arbitrary and could reflect the backing pool's replication factor, device class, or failure domain. This example also defines storage class `REDUCED_REDUNDANCY` for each placement.
+
+```yaml
+apiVersion: ceph.rook.io/v1
+kind: CephObjectStore
+metadata:
+ name: my-store
+ namespace: rook-ceph
+spec:
+ gateway:
+ port: 80
+ instances: 1
+ sharedPools:
+ poolPlacements:
+ - name: default
+ metadataPoolName: "us-data-pool"
+ dataPoolName: "us-meta-pool"
+ storageClasses:
+ - name: REDUCED_REDUNDANCY
+ dataPoolName: "us-reduced-pool"
+ - name: europe
+ metadataPoolName: "eu-meta-pool"
+ dataPoolName: "eu-data-pool"
+ storageClasses:
+ - name: REDUCED_REDUNDANCY
+ dataPoolName: "eu-reduced-pool"
+```
+
+S3 clients can direct objects into the pools defined in the above. The example below uses the [s5cmd](https://github.com/peak/s5cmd) CLI tool which is pre-installed in the toolbox pod:
+
+```shell
+# make bucket without location constraint -> will use "us"
+s5cmd mb s3://bucket1
+
+# put object to bucket1 without storage class -> end up in "us-data-pool"
+s5cmd put obj s3://bucket1/obj
+
+# put object to bucket1 with "STANDARD" storage class -> end up in "us-data-pool"
+s5cmd put obj s3://bucket1/obj --storage-class=STANDARD
+
+# put object to bucket1 with "REDUCED_REDUNDANCY" storage class -> end up in "us-reduced-pool"
+s5cmd put obj s3://bucket1/obj --storage-class=REDUCED_REDUNDANCY
+
+
+# make bucket with location constraint europe
+s5cmd mb s3://bucket2 --region=my-store:europe
+
+# put object to bucket2 without storage class -> end up in "eu-data-pool"
+s5cmd put obj s3://bucket2/obj
+
+# put object to bucket2 with "STANDARD" storage class -> end up in "eu-data-pool"
+s5cmd put obj s3://bucket2/obj --storage-class=STANDARD
+
+# put object to bucket2 with "REDUCED_REDUNDANCY" storage class -> end up in "eu-reduced-pool"
+s5cmd put obj s3://bucket2/obj --storage-class=REDUCED_REDUNDANCY
+
+```
+
### Connect to an External Object Store
Rook can connect to existing RGW gateways to work in conjunction with the external mode of the `CephCluster` CRD. First, create a `rgw-admin-ops-user` user in the Ceph cluster with the necessary caps:
diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml
index e7954efe7b9e..767e08498461 100644
--- a/deploy/charts/rook-ceph/templates/resources.yaml
+++ b/deploy/charts/rook-ceph/templates/resources.yaml
@@ -12316,12 +12316,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zone:
description: The multisite info
@@ -13178,12 +13238,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zoneGroup:
description: The display name for the ceph users
diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml
index 5978262293d4..9f7a3d225655 100644
--- a/deploy/examples/crds.yaml
+++ b/deploy/examples/crds.yaml
@@ -12307,12 +12307,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zone:
description: The multisite info
@@ -13166,12 +13226,72 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
+ poolPlacements:
+ description: |-
+ PoolPlacements control which Pools are associated with a particular RGW bucket.
+ Once PoolPlacements are defined, RGW client will be able to associate pool
+ with ObjectStore bucket by providing "" during s3 bucket creation
+ or "X-Storage-Policy" header during swift container creation.
+ See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ PoolPlacement with name: "default" will be used as a default pool if no option
+ is provided during bucket creation.
+ If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ items:
+ properties:
+ dataNonECPoolName:
+ description: |-
+ The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ type: string
+ dataPoolName:
+ description: The data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ metadataPoolName:
+ description: The metadata pool used to store ObjectStore bucket index.
+ minLength: 1
+ type: string
+ name:
+ description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ storageClasses:
+ description: |-
+ StorageClasses can be selected by user to override dataPoolName during object creation.
+ Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ items:
+ properties:
+ dataPoolName:
+ description: DataPoolName is the data pool used to store ObjectStore objects data.
+ minLength: 1
+ type: string
+ name:
+ description: |-
+ Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ however most clients/libs insist on AWS names so it is recommended to use
+ one of the valid x-amz-storage-class values for better compatibility:
+ REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ minLength: 1
+ pattern: ^[a-zA-Z0-9._/-]+$
+ type: string
+ required:
+ - dataPoolName
+ - name
+ type: object
+ type: array
+ required:
+ - dataPoolName
+ - metadataPoolName
+ - name
+ type: object
+ type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
- required:
- - dataPoolName
- - metadataPoolName
type: object
zoneGroup:
description: The display name for the ceph users
diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go
index 380c1f3cd23d..c817ac41ac73 100755
--- a/pkg/apis/ceph.rook.io/v1/types.go
+++ b/pkg/apis/ceph.rook.io/v1/types.go
@@ -1517,15 +1517,75 @@ type ObjectStoreSpec struct {
type ObjectSharedPoolsSpec struct {
// The metadata pool used for creating RADOS namespaces in the object store
// +kubebuilder:validation:XValidation:message="object store shared metadata pool is immutable",rule="self == oldSelf"
- MetadataPoolName string `json:"metadataPoolName"`
+ // +optional
+ MetadataPoolName string `json:"metadataPoolName,omitempty"`
// The data pool used for creating RADOS namespaces in the object store
// +kubebuilder:validation:XValidation:message="object store shared data pool is immutable",rule="self == oldSelf"
- DataPoolName string `json:"dataPoolName"`
+ // +optional
+ DataPoolName string `json:"dataPoolName,omitempty"`
// Whether the RADOS namespaces should be preserved on deletion of the object store
// +optional
PreserveRadosNamespaceDataOnDelete bool `json:"preserveRadosNamespaceDataOnDelete"`
+
+ // PoolPlacements control which Pools are associated with a particular RGW bucket.
+ // Once PoolPlacements are defined, RGW client will be able to associate pool
+ // with ObjectStore bucket by providing "" during s3 bucket creation
+ // or "X-Storage-Policy" header during swift container creation.
+ // See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
+ // PoolPlacement with name: "default" will be used as a default pool if no option
+ // is provided during bucket creation.
+ // If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
+ // If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
+ // +optional
+ PoolPlacements []PoolPlacementSpec `json:"poolPlacements,omitempty"`
+}
+
+type PoolPlacementSpec struct {
+ // Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$`
+ Name string `json:"name"`
+
+ // The metadata pool used to store ObjectStore bucket index.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ MetadataPoolName string `json:"metadataPoolName"`
+
+ // The data pool used to store ObjectStore objects data.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ DataPoolName string `json:"dataPoolName"`
+
+ // The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
+ // If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
+ // +optional
+ DataNonECPoolName string `json:"dataNonECPoolName,omitempty"`
+
+ // StorageClasses can be selected by user to override dataPoolName during object creation.
+ // Each placement has default STANDARD StorageClass pointing to dataPoolName.
+ // This list allows defining additional StorageClasses on top of default STANDARD storage class.
+ // +optional
+ StorageClasses []PlacementStorageClassSpec `json:"storageClasses,omitempty"`
+}
+
+type PlacementStorageClassSpec struct {
+ // Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
+ // however most clients/libs insist on AWS names so it is recommended to use
+ // one of the valid x-amz-storage-class values for better compatibility:
+ // REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
+ // See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9._/-]+$`
+ Name string `json:"name"`
+
+ // DataPoolName is the data pool used to store ObjectStore objects data.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ DataPoolName string `json:"dataPoolName"`
}
// ObjectHealthCheckSpec represents the health check of an object store
@@ -2019,7 +2079,7 @@ type CephObjectZoneGroupList struct {
// ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup
type ObjectZoneGroupSpec struct {
- //The display name for the ceph users
+ // The display name for the ceph users
Realm string `json:"realm"`
}
@@ -2050,7 +2110,7 @@ type CephObjectZoneList struct {
// ObjectZoneSpec represent the spec of an ObjectZone
type ObjectZoneSpec struct {
- //The display name for the ceph users
+ // The display name for the ceph users
ZoneGroup string `json:"zoneGroup"`
// The metadata pool settings
diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go
index 5e5a2596f515..67ab074a3b84 100644
--- a/pkg/operator/ceph/object/admin.go
+++ b/pkg/operator/ceph/object/admin.go
@@ -48,6 +48,14 @@ type Context struct {
Zone string
}
+func (c *Context) nsName() string {
+ if c.clusterInfo == nil {
+ logger.Infof("unable to get namespaced name for rgw %s", c.Name)
+ return c.Name
+ }
+ return fmt.Sprintf("%s/%s", c.clusterInfo.Namespace, c.Name)
+}
+
// AdminOpsContext holds the object store context as well as information for connecting to the admin
// ops API.
type AdminOpsContext struct {
@@ -101,9 +109,7 @@ const (
rgwAdminOpsUserCaps = "buckets=*;users=*;usage=read;metadata=read;zone=read"
)
-var (
- rgwAdminOpsUserDisplayName = "RGW Admin Ops User"
-)
+var rgwAdminOpsUserDisplayName = "RGW Admin Ops User"
// NewContext creates a new object store context.
func NewContext(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, name string) *Context {
diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go
index 651978ec9204..82e8b128c820 100644
--- a/pkg/operator/ceph/object/controller.go
+++ b/pkg/operator/ceph/object/controller.go
@@ -441,12 +441,19 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to set endpoint", err)
}
+ err = ValidateObjectStorePoolsConfig(cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool, cephObjectStore.Spec.SharedPools)
+ if err != nil {
+ return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "invalid pool configuration", err)
+ }
// Reconcile Pool Creation
if !cephObjectStore.Spec.IsMultisite() {
logger.Info("reconciling object store pools")
- err = ConfigurePools(objContext, r.clusterSpec, cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool, cephObjectStore.Spec.SharedPools)
- if err != nil {
- return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to create object pools", err)
+
+ if IsNeedToCreateObjectStorePools(cephObjectStore.Spec.SharedPools) {
+ err = CreateObjectStorePools(objContext, r.clusterSpec, cephObjectStore.Spec.MetadataPool, cephObjectStore.Spec.DataPool)
+ if err != nil {
+ return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to create object pools", err)
+ }
}
}
diff --git a/pkg/operator/ceph/object/json_helpers.go b/pkg/operator/ceph/object/json_helpers.go
new file mode 100644
index 000000000000..719339e7db2a
--- /dev/null
+++ b/pkg/operator/ceph/object/json_helpers.go
@@ -0,0 +1,125 @@
+package object
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// getObjProperty - helper function to manipulate JSON Objects.
+// returns nested property of json object.
+// Example:
+//
+// obj = {"a":{"b":"foo"}}
+// // will return "foo"
+// getObjProperty(obj,"a","b")
+func getObjProperty[T string | map[string]interface{} | []interface{}](obj map[string]interface{}, path ...string) (T, error) {
+ var res T
+ if len(path) == 0 {
+ return res, fmt.Errorf("json property path is empty")
+ }
+
+ for i, p := range path {
+ val, ok := obj[p]
+ if !ok {
+ return res, fmt.Errorf("json property %q not found", strings.Join(path[:i+1], "."))
+ }
+ last := i == len(path)-1
+ if last {
+ // last path segment: get result
+ res, ok = val.(T)
+ if !ok {
+ return res, fmt.Errorf("json property %q is not a %T, got %+v", strings.Join(path, "."), res, val)
+ }
+ return res, nil
+ }
+ // walk to the next obj in the path
+ obj, ok = val.(map[string]interface{})
+ if !ok {
+ return res, fmt.Errorf("json property %q is not an object, got %+v", strings.Join(path[:i+1], "."), val)
+ }
+ }
+ // not reachable
+ return res, fmt.Errorf("json property %q not found", strings.Join(path, "."))
+}
+
+// setObjProperty - helper function to manipulate JSON Objects.
+// sets value to json object nested field and returns previous value if presented.
+// Example:
+//
+// obj = {"a":{"b":"foo"}}
+// // will replace "foo" with "bar" and return "foo"
+// setObjProperty(obj,"bar","a","b")
+func setObjProperty[T string | []string | map[string]interface{} | []interface{}](obj map[string]interface{}, val T, path ...string) (T, error) {
+ var prev T
+ if len(path) == 0 {
+ return prev, fmt.Errorf("json property path is empty")
+ }
+ for i, p := range path {
+ last := i == len(path)-1
+ if last {
+ // last path segment: set result and return prev value
+ prevVal, ok := obj[p]
+ if ok {
+ prevRes, ok := prevVal.(T)
+ if ok {
+ prev = prevRes
+ } else {
+ // in go json all arrays are []interface{}, extra conversion for typed arrays (e.g. []string) needed:
+ p := new(T)
+ if castJson(prevVal, p) {
+ prev = *p
+ }
+ }
+ }
+ obj[p] = val
+ return prev, nil
+ }
+ // walk to the next obj in the path
+ next, ok := obj[p]
+ if !ok {
+ return prev, fmt.Errorf("json property %q is not found", strings.Join(path[:i+1], "."))
+ }
+ obj, ok = next.(map[string]interface{})
+ if !ok {
+ return prev, fmt.Errorf("json property %q is not an object, got %+v", strings.Join(path[:i+1], "."), next)
+ }
+ }
+ // not reachable
+ return prev, fmt.Errorf("json property %q not found", strings.Join(path, "."))
+}
+
+// castJson - helper function to manipulate JSON Objects.
+// Tries to cast any type to any type by converting to JSON and back.
+// Returns true on success.
+func castJson(in, out interface{}) bool {
+ bytes, err := json.Marshal(in)
+ if err != nil {
+ return false
+ }
+ err = json.Unmarshal(bytes, out)
+ return err == nil
+}
+
+// toObj - helper function to manipulate JSON Objects.
+// Casts any go struct to map representing JSON object.
+func toObj(val interface{}) (map[string]interface{}, error) {
+ bytes, err := json.Marshal(val)
+ if err != nil {
+ return nil, err
+ }
+ obj := map[string]interface{}{}
+ return obj, json.Unmarshal(bytes, &obj)
+}
+
+// deepCopyJson - helper function to manipulate JSON Objects.
+// Makes deep copy of json object by converting to JSON and back.
+func deepCopyJson(in map[string]interface{}) (map[string]interface{}, error) {
+ bytes, err := json.Marshal(in)
+ if err != nil {
+ return nil, err
+ }
+ res := map[string]interface{}{}
+ err = json.Unmarshal(bytes, &res)
+ return res, err
+}
diff --git a/pkg/operator/ceph/object/json_helpers_test.go b/pkg/operator/ceph/object/json_helpers_test.go
new file mode 100644
index 000000000000..9448c107067c
--- /dev/null
+++ b/pkg/operator/ceph/object/json_helpers_test.go
@@ -0,0 +1,509 @@
+package object
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_getObjPropertyStr(t *testing.T) {
+ type args struct {
+ json string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ wantErr bool
+ }{
+ {
+ name: "success",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "val",
+ wantErr: false,
+ },
+ {
+ name: "success: empty str",
+ args: args{
+ json: `{"a":{"b":""}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: false,
+ },
+ {
+ name: "err: empty json",
+ args: args{
+ json: `{}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is obj",
+ args: args{
+ json: `{"a":{"b":{"val":"val"}}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is arr",
+ args: args{
+ json: `{"a":{"b":["val1","val2"]}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is bool",
+ args: args{
+ json: `{"a":{"b":true}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is num",
+ args: args{
+ json: `{"a":{"b":5}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ {
+ name: "err: is missing",
+ args: args{
+ json: `{"a":{"c":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: "",
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.json), &obj)
+ got, err := getObjProperty[string](obj, tt.args.path...)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getObjProperty() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getObjProperty() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getObjPropertyObjArr(t *testing.T) {
+ type args struct {
+ json string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []interface{}
+ wantErr bool
+ }{
+ {
+ name: "success",
+ args: args{
+ json: `{"a":{"b":[
+ {"c":"val1"},
+ {"d":"val2"}
+ ]}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ want: []interface{}{
+ map[string]interface{}{"c": "val1"},
+ map[string]interface{}{"d": "val2"},
+ },
+ wantErr: false,
+ },
+ {
+ name: "err: empty json",
+ args: args{
+ json: `{}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is obj",
+ args: args{
+ json: `{"a":{"b":{"val":"val"}}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is bool",
+ args: args{
+ json: `{"a":{"b":true}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is num",
+ args: args{
+ json: `{"a":{"b":5}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "err: is missing",
+ args: args{
+ json: `{"a":{"c":"val"}}`,
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.json), &obj)
+ got, err := getObjProperty[[]interface{}](obj, tt.args.path...)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getObjProperty() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getObjProperty() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_setObjProperty(t *testing.T) {
+ type args struct {
+ json string
+ val string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev string
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "replace val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "new val",
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: "val",
+ wantJSON: `{"a":{"b":"new val"}}`,
+ wantErr: false,
+ },
+ {
+ name: "same val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val",
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: "val",
+ wantJSON: `{"a":{"b":"val"}}`,
+ wantErr: false,
+ },
+ {
+ name: "add val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val2",
+ path: []string{
+ "a", "c",
+ },
+ },
+ wantPrev: "",
+ wantJSON: `{"a":{"b":"val","c":"val2"}}`,
+ wantErr: false,
+ },
+ {
+ name: "add root val",
+ args: args{
+ json: `{"a":{"b":"val"}}`,
+ val: "val2",
+ path: []string{
+ "c",
+ },
+ },
+ wantPrev: "",
+ wantJSON: `{"a":{"b":"val"},"c":"val2"}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+func Test_setObjPropertyObj(t *testing.T) {
+ type args struct {
+ json string
+ val map[string]interface{}
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev map[string]interface{}
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "add obj",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: map[string]interface{}{"c": "val1"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: map[string]interface{}{},
+ wantJSON: `{"a":{"b":{"c":"val1"}}}`,
+ wantErr: false,
+ },
+ {
+ name: "set obj",
+ args: args{
+ json: `{"a":{"b":{"c": "val1"}}}`,
+ val: map[string]interface{}{"d": "val2"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: map[string]interface{}{"c": "val1"},
+ wantJSON: `{"a":{"b":{"d":"val2"}}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_setObjPropertyArr(t *testing.T) {
+ type args struct {
+ json string
+ val []interface{}
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev []interface{}
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "set obj arr",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: []interface{}{
+ map[string]interface{}{"c": "val1"},
+ map[string]interface{}{"d": "val2"},
+ },
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: nil,
+ wantJSON: `{"a":{"b":[{"c":"val1"},{"d":"val2"}]}}`,
+ wantErr: false,
+ },
+ {
+ name: "add obj arr",
+ args: args{
+ json: `{"a":{"b":[{"c": "val"}]}}`,
+ val: []interface{}{
+ map[string]interface{}{"d": "val1"},
+ map[string]interface{}{"e": "val2"},
+ },
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: []interface{}{
+ map[string]interface{}{"c": "val"},
+ },
+ wantJSON: `{"a":{"b":[{"d":"val1"},{"e":"val2"}]}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+func Test_setObjPropertyStrArr(t *testing.T) {
+ type args struct {
+ json string
+ val []string
+ path []string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPrev []string
+ wantJSON string
+ wantErr bool
+ }{
+ {
+ name: "add str arr",
+ args: args{
+ json: `{"a":{"b":{}}}`,
+ val: []string{"c", "d"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: nil,
+ wantJSON: `{"a":{"b":["c","d"]}}`,
+ wantErr: false,
+ },
+ {
+ name: "set str arr",
+ args: args{
+ json: `{"a":{"b":["val"]}}`,
+ val: []string{"c", "d"},
+ path: []string{
+ "a", "b",
+ },
+ },
+ wantPrev: []string{"val"},
+ wantJSON: `{"a":{"b":["c","d"]}}`,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ obj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.json), &obj)
+ assert.NoError(t, err)
+ prev, err := setObjProperty(obj, tt.args.val, tt.args.path...)
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.EqualValues(t, tt.wantPrev, prev)
+ bytes, err := json.Marshal(obj)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_deepCopyJson(t *testing.T) {
+ in := map[string]interface{}{
+ "key": []interface{}{"1", "2", "3"},
+ }
+ inCopy, err := deepCopyJson(in)
+ assert.NoError(t, err)
+ assert.EqualValues(t, in, inCopy)
+
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, in["key"])
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, inCopy["key"])
+
+ inCopy["key"].([]interface{})[1] = "7"
+
+ assert.EqualValues(t, []interface{}{"1", "2", "3"}, in["key"])
+ assert.EqualValues(t, []interface{}{"1", "7", "3"}, inCopy["key"])
+}
diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go
index bf194f4f40e4..794419ac8498 100644
--- a/pkg/operator/ceph/object/objectstore.go
+++ b/pkg/operator/ceph/object/objectstore.go
@@ -21,7 +21,7 @@ import (
"encoding/json"
"fmt"
"os"
- "path"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -209,7 +209,6 @@ func deleteSingleSiteRealmAndPools(objContext *Context, spec cephv1.ObjectStoreS
// This is used for quickly getting the name of the realm, zone group, and zone for an object-store to pass into a Context
func getMultisiteForObjectStore(ctx context.Context, clusterdContext *clusterd.Context, spec *cephv1.ObjectStoreSpec, namespace, name string) (string, string, string, error) {
-
if spec.IsExternal() {
// In https://github.com/rook/rook/issues/6342, it was determined that
// a multisite context isn't needed for external mode CephObjectStores.
@@ -749,16 +748,8 @@ func missingPools(context *Context) ([]string, error) {
return missingPools, nil
}
-func ConfigurePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec, sharedPools cephv1.ObjectSharedPoolsSpec) error {
- if sharedPoolsSpecified(sharedPools) {
- if !EmptyPool(dataPool) || !EmptyPool(metadataPool) {
- return fmt.Errorf("object store shared pools can only be specified if the metadata and data pools are not specified")
- }
- // Shared pools are configured elsewhere
- return nil
- }
-
- if EmptyPool(dataPool) && EmptyPool(metadataPool) {
+func CreateObjectStorePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool, dataPool cephv1.PoolSpec) error {
+ if EmptyPool(dataPool) || EmptyPool(metadataPool) {
logger.Info("no pools specified for the CR, checking for their existence...")
missingPools, err := missingPools(context)
if err != nil {
@@ -793,77 +784,60 @@ func ConfigurePools(context *Context, cluster *cephv1.ClusterSpec, metadataPool,
return nil
}
-func sharedPoolsSpecified(sharedPools cephv1.ObjectSharedPoolsSpec) bool {
- return sharedPools.DataPoolName != "" && sharedPools.MetadataPoolName != ""
-}
-
func ConfigureSharedPoolsForZone(objContext *Context, sharedPools cephv1.ObjectSharedPoolsSpec) error {
- if !sharedPoolsSpecified(sharedPools) {
- logger.Debugf("no shared pools to configure for store %q", objContext.Name)
+ if sharedPools.DataPoolName == "" && sharedPools.MetadataPoolName == "" && len(sharedPools.PoolPlacements) == 0 {
+ logger.Debugf("no shared pools to configure for store %q", objContext.nsName())
return nil
}
+ logger.Infof("configuring shared pools for object store %q", objContext.nsName())
if err := sharedPoolsExist(objContext, sharedPools); err != nil {
return errors.Wrapf(err, "object store cannot be configured until shared pools exist")
}
- // retrieve the zone config
- logger.Infof("Retrieving zone %q", objContext.Zone)
- realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
- zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
- zoneArg := "--rgw-zone=" + objContext.Zone
- args := []string{"zone", "get", realmArg, zoneGroupArg, zoneArg}
-
- output, err := RunAdminCommandNoMultisite(objContext, true, args...)
+ zoneConfig, err := getZoneJSON(objContext)
if err != nil {
- return errors.Wrap(err, "failed to get zone")
+ return err
}
-
- logger.Debugf("Zone config is currently:\n%s", output)
-
- var zoneConfig map[string]interface{}
- err = json.Unmarshal([]byte(output), &zoneConfig)
+ zoneUpdated, err := adjustZoneDefaultPools(zoneConfig, sharedPools)
if err != nil {
- return errors.Wrap(err, "failed to unmarshal zone")
- }
-
- metadataPrefix := fmt.Sprintf("%s:%s.", sharedPools.MetadataPoolName, objContext.Name)
- dataPrefix := fmt.Sprintf("%s:%s.", sharedPools.DataPoolName, objContext.Name)
- expectedDataPool := dataPrefix + "buckets.data"
- if dataPoolIsExpected(objContext, zoneConfig, expectedDataPool) {
- logger.Debugf("Data pool already set as expected to %q", expectedDataPool)
- return nil
- }
-
- logger.Infof("Updating rados namespace configuration for zone %q", objContext.Zone)
- if err := applyExpectedRadosNamespaceSettings(zoneConfig, metadataPrefix, dataPrefix, expectedDataPool); err != nil {
- return errors.Wrap(err, "failed to configure rados namespaces")
+ return err
}
-
- configBytes, err := json.Marshal(zoneConfig)
+ zoneUpdated, err = adjustZonePlacementPools(zoneUpdated, sharedPools)
if err != nil {
- return errors.Wrap(err, "failed to serialize zone config")
+ return err
}
- logger.Debugf("Raw zone settings to apply: %s", string(configBytes))
+ hasZoneChanged := !reflect.DeepEqual(zoneConfig, zoneUpdated)
- configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonecfg")
- if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
- return errors.Wrap(err, "failed to write zonfig config file")
+ zoneGroupConfig, err := getZoneGroupJSON(objContext)
+ if err != nil {
+ return err
}
- defer os.Remove(configFilename)
-
- args = []string{"zone", "set", zoneArg, "--infile=" + configFilename, realmArg, zoneGroupArg}
- output, err = RunAdminCommandNoMultisite(objContext, false, args...)
+ zoneGroupUpdated, err := adjustZoneGroupPlacementTargets(zoneGroupConfig, zoneUpdated)
if err != nil {
- return errors.Wrap(err, "failed to set zone config")
+ return err
}
- logger.Debugf("Zone set results=%s", output)
+ hasZoneGroupChanged := !reflect.DeepEqual(zoneGroupConfig, zoneGroupUpdated)
- if err = zoneUpdateWorkaround(objContext, output, expectedDataPool); err != nil {
- return errors.Wrap(err, "failed to apply zone set workaround")
+ // persist configuration updates:
+ if hasZoneChanged {
+ logger.Infof("zone config changed: performing zone config updates for %s", objContext.Zone)
+ updatedZoneResult, err := updateZoneJSON(objContext, zoneUpdated)
+ if err != nil {
+ return fmt.Errorf("unable to persist zone config update for %s: %w", objContext.Zone, err)
+ }
+ if err = zoneUpdateWorkaround(objContext, zoneUpdated, updatedZoneResult); err != nil {
+ return fmt.Errorf("failed to apply zone set workaround: %w", err)
+ }
+ }
+ if hasZoneGroupChanged {
+ logger.Infof("zonegroup config changed: performing zonegroup config updates for %s", objContext.ZoneGroup)
+ _, err = updateZoneGroupJSON(objContext, zoneGroupUpdated)
+ if err != nil {
+ return fmt.Errorf("unable to persist zonegroup config update for %s: %w", objContext.ZoneGroup, err)
+ }
}
- logger.Infof("Successfully configured RADOS namespaces for object store %q", objContext.Name)
return nil
}
@@ -872,139 +846,226 @@ func sharedPoolsExist(objContext *Context, sharedPools cephv1.ObjectSharedPoolsS
if err != nil {
return errors.Wrapf(err, "failed to list pools")
}
- foundMetadataPool := false
- foundDataPool := false
+ existing := make(map[string]struct{}, len(existingPools))
for _, pool := range existingPools {
- if pool.Name == sharedPools.MetadataPoolName {
- foundMetadataPool = true
- }
- if pool.Name == sharedPools.DataPoolName {
- foundDataPool = true
- }
+ existing[pool.Name] = struct{}{}
}
+ // sharedPools.MetadataPoolName, DataPoolName, and sharedPools.PoolPlacements.DataNonECPoolName are optional.
+ // ignore optional pools with empty name:
+ existing[""] = struct{}{}
- if !foundMetadataPool && !foundDataPool {
- return fmt.Errorf("pools do not exist: %q and %q", sharedPools.MetadataPoolName, sharedPools.DataPoolName)
+ if _, ok := existing[sharedPools.MetadataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: %s", sharedPools.MetadataPoolName)
}
- if !foundMetadataPool {
- return fmt.Errorf("metadata pool does not exist: %q", sharedPools.MetadataPoolName)
+ if _, ok := existing[sharedPools.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: %s", sharedPools.DataPoolName)
}
- if !foundDataPool {
- return fmt.Errorf("data pool does not exist: %q", sharedPools.DataPoolName)
+
+ for _, pp := range sharedPools.PoolPlacements {
+ if _, ok := existing[pp.MetadataPoolName]; !ok {
+ return fmt.Errorf("sharedPool does not exist: pool %s for placement %s", pp.MetadataPoolName, pp.Name)
+ }
+ if _, ok := existing[pp.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for placement %s", pp.DataPoolName, pp.Name)
+ }
+ if _, ok := existing[pp.DataNonECPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for placement %s", pp.DataNonECPoolName, pp.Name)
+ }
+ for _, sc := range pp.StorageClasses {
+ if _, ok := existing[sc.DataPoolName]; !ok {
+ return fmt.Errorf("sharedPool do not exist: pool %s for StorageClass %s", sc.DataPoolName, sc.Name)
+ }
+ }
}
- logger.Info("verified shared pools exist")
return nil
}
-func applyExpectedRadosNamespaceSettings(zoneConfig map[string]interface{}, metadataPrefix, dataPrefix, dataPool string) error {
- // Update the necessary fields for RAODS namespaces
- zoneConfig["domain_root"] = metadataPrefix + "meta.root"
- zoneConfig["control_pool"] = metadataPrefix + "control"
- zoneConfig["gc_pool"] = metadataPrefix + "log.gc"
- zoneConfig["lc_pool"] = metadataPrefix + "log.lc"
- zoneConfig["log_pool"] = metadataPrefix + "log"
- zoneConfig["intent_log_pool"] = metadataPrefix + "log.intent"
- zoneConfig["usage_log_pool"] = metadataPrefix + "log.usage"
- zoneConfig["roles_pool"] = metadataPrefix + "meta.roles"
- zoneConfig["reshard_pool"] = metadataPrefix + "log.reshard"
- zoneConfig["user_keys_pool"] = metadataPrefix + "meta.users.keys"
- zoneConfig["user_email_pool"] = metadataPrefix + "meta.users.email"
- zoneConfig["user_swift_pool"] = metadataPrefix + "meta.users.swift"
- zoneConfig["user_uid_pool"] = metadataPrefix + "meta.users.uid"
- zoneConfig["otp_pool"] = metadataPrefix + "otp"
- zoneConfig["notif_pool"] = metadataPrefix + "log.notif"
-
- placementPools, ok := zoneConfig["placement_pools"].([]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools")
+func adjustZoneDefaultPools(zone map[string]interface{}, spec cephv1.ObjectSharedPoolsSpec) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
}
- if len(placementPools) == 0 {
- return fmt.Errorf("no placement pools")
+
+ zone, err = deepCopyJson(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy zone %s: %w", name, err)
+ }
+
+ defaultMetaPool := getDefaultMetadataPool(spec)
+ if defaultMetaPool == "" {
+ // default pool is not presented in shared pool spec
+ return zone, nil
+ }
+ // add zone namespace to metadata pool to safely share accorss rgw instances or zones.
+ // in non-multisite case zone name equals to rgw instance name
+ defaultMetaPool = defaultMetaPool + ":" + name
+ zonePoolNSSuffix := map[string]string{
+ "domain_root": ".meta.root",
+ "control_pool": ".control",
+ "gc_pool": ".log.gc",
+ "lc_pool": ".log.lc",
+ "log_pool": ".log",
+ "intent_log_pool": ".log.intent",
+ "usage_log_pool": ".log.usage",
+ "roles_pool": ".meta.roles",
+ "reshard_pool": ".log.reshard",
+ "user_keys_pool": ".meta.users.keys",
+ "user_email_pool": ".meta.users.email",
+ "user_swift_pool": ".meta.users.swift",
+ "user_uid_pool": ".meta.users.uid",
+ "otp_pool": ".otp",
+ "notif_pool": ".log.notif",
+ }
+ for pool, nsSuffix := range zonePoolNSSuffix {
+ // replace rgw internal index pools with namespaced metadata pool
+ namespacedPool := defaultMetaPool + nsSuffix
+ prev, err := setObjProperty(zone, namespacedPool, pool)
+ if err != nil {
+ return nil, fmt.Errorf("unable to set pool %s for zone %s: %w", pool, name, err)
+ }
+ if namespacedPool != prev {
+ logger.Debugf("update shared pool %s for zone %s: %s -> %s", pool, name, prev, namespacedPool)
+ }
}
- placementPool, ok := placementPools[0].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools[0]")
+ return zone, nil
+}
+
+// There was a radosgw-admin bug that was preventing the RADOS namespace from being applied
+// for the data pool. The fix is included in Reef v18.2.3 or newer, and v19.2.0.
+// The workaround is to run a "radosgw-admin zone placement modify" command to apply
+// the desired data pool config.
+// After Reef (v18) support is removed, this method will be dead code.
+func zoneUpdateWorkaround(objContext *Context, expectedZone, gotZone map[string]interface{}) error {
+ // Update the necessary fields for RAODS namespaces
+ // If the radosgw-admin fix is in the release, the data pool is already applied and we skip the workaround.
+ expected, err := getObjProperty[[]interface{}](expectedZone, "placement_pools")
+ if err != nil {
+ return err
}
- placementVals, ok := placementPool["val"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse placement_pools[0].val")
+ got, err := getObjProperty[[]interface{}](gotZone, "placement_pools")
+ if err != nil {
+ return err
}
- placementVals["index_pool"] = metadataPrefix + "buckets.index"
- // The extra pool is for omap data for multi-part uploads, so we use
- // the metadata pool instead of the data pool.
- placementVals["data_extra_pool"] = metadataPrefix + "buckets.non-ec"
- storageClasses, ok := placementVals["storage_classes"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse storage_classes")
+ if len(expected) != len(got) {
+ // should not happen
+ return fmt.Errorf("placements were not applied to zone config: expected %+v, got %+v", expected, got)
}
- stdStorageClass, ok := storageClasses["STANDARD"].(map[string]interface{})
- if !ok {
- return fmt.Errorf("failed to parse storage_classes.STANDARD")
+
+ // update pool placements one-by-one if needed
+ for i, expPl := range expected {
+ expPoolObj, ok := expPl.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unable to cast pool placement to object: %+v", expPl)
+ }
+ expPoolName, err := getObjProperty[string](expPoolObj, "key")
+ if err != nil {
+ return fmt.Errorf("unable to get pool placement name: %w", err)
+ }
+
+ gotPoolObj, ok := got[i].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unable to cast pool placement to object: %+v", got[i])
+ }
+ gotPoolName, err := getObjProperty[string](gotPoolObj, "key")
+ if err != nil {
+ return fmt.Errorf("unable to get pool placement name: %w", err)
+ }
+
+ if expPoolName != gotPoolName {
+ // should not happen
+ return fmt.Errorf("placements were not applied to zone config: expected %+v, got %+v", expected, got)
+ }
+ err = zoneUpdatePlacementWorkaround(objContext, gotPoolName, expPoolObj, gotPoolObj)
+ if err != nil {
+ return fmt.Errorf("unable to do zone update workaround for placement %q: %w", gotPoolName, err)
+ }
}
- stdStorageClass["data_pool"] = dataPool
return nil
}
-func dataPoolIsExpected(objContext *Context, zoneConfig map[string]interface{}, expectedDataPool string) bool {
- placementPools, ok := zoneConfig["placement_pools"].([]interface{})
- if !ok {
- return false
+func zoneUpdatePlacementWorkaround(objContext *Context, placementID string, expect, got map[string]interface{}) error {
+ args := []string{
+ "zone", "placement", "modify",
+ "--rgw-realm=" + objContext.Realm,
+ "--rgw-zonegroup=" + objContext.ZoneGroup,
+ "--rgw-zone=" + objContext.Zone,
+ "--placement-id", placementID,
}
- placementPool, ok := placementPools[0].(map[string]interface{})
- if !ok {
- return false
+ // check index and data pools
+ needsWorkaround := false
+ expPool, err := getObjProperty[string](expect, "val", "index_pool")
+ if err != nil {
+ return err
}
- placementVals, ok := placementPool["val"].(map[string]interface{})
- if !ok {
- return false
+ gotPool, err := getObjProperty[string](got, "val", "index_pool")
+ if err != nil {
+ return err
}
- storageClasses, ok := placementVals["storage_classes"].(map[string]interface{})
- if !ok {
- return false
+ if expPool != gotPool {
+ logger.Infof("do zone update workaround for zone %s, placement %s index pool: %s -> %s", objContext.Zone, placementID, gotPool, expPool)
+ args = append(args, "--index-pool="+expPool)
+ needsWorkaround = true
}
- stdStorageClass, ok := storageClasses["STANDARD"].(map[string]interface{})
- if !ok {
- return false
+ expPool, err = getObjProperty[string](expect, "val", "data_extra_pool")
+ if err != nil {
+ return err
}
- logger.Infof("data pool is currently set to %q", stdStorageClass["data_pool"])
- return stdStorageClass["data_pool"] == expectedDataPool
-}
-
-// There was a radosgw-admin bug that was preventing the RADOS namespace from being applied
-// for the data pool. The fix is included in Reef v18.2.3 or newer, and v19.2.0.
-// The workaround is to run a "radosgw-admin zone placement modify" command to apply
-// the desired data pool config.
-// After Reef (v18) support is removed, this method will be dead code.
-func zoneUpdateWorkaround(objContext *Context, zoneOutput, expectedDataPool string) error {
- var zoneConfig map[string]interface{}
- err := json.Unmarshal([]byte(zoneOutput), &zoneConfig)
+ gotPool, err = getObjProperty[string](got, "val", "data_extra_pool")
if err != nil {
- return errors.Wrap(err, "failed to unmarshal zone")
+ return err
}
- // Update the necessary fields for RAODS namespaces
- // If the radosgw-admin fix is in the release, the data pool is already applied and we skip the workaround.
- if dataPoolIsExpected(objContext, zoneConfig, expectedDataPool) {
- logger.Infof("data pool was already set as expected to %q, workaround not needed", expectedDataPool)
- return nil
+ if expPool != gotPool {
+ logger.Infof("do zone update workaround for zone %s, placement %s data extra pool: %s -> %s", objContext.Zone, placementID, gotPool, expPool)
+ args = append(args, "--data-extra-pool="+expPool)
+ needsWorkaround = true
}
- logger.Infof("Setting data pool to %q", expectedDataPool)
- args := []string{"zone", "placement", "modify",
- "--rgw-realm=" + objContext.Realm,
- "--rgw-zonegroup=" + objContext.ZoneGroup,
- "--rgw-zone=" + objContext.Name,
- "--placement-id", "default-placement",
- "--storage-class", "STANDARD",
- "--data-pool=" + expectedDataPool,
+ if needsWorkaround {
+ _, err = RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return errors.Wrap(err, "failed to set zone config")
+ }
}
-
- output, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ expSC, err := getObjProperty[map[string]interface{}](expect, "val", "storage_classes")
if err != nil {
- return errors.Wrap(err, "failed to set zone config")
+ return err
+ }
+ gotSC, err := getObjProperty[map[string]interface{}](got, "val", "storage_classes")
+ if err != nil {
+ return err
}
- logger.Debugf("zone placement modify output=%s", output)
- logger.Info("zone placement for the data pool was applied successfully")
+
+ // check storage classes data pools
+ for sc := range expSC {
+ expDP, err := getObjProperty[string](expSC, sc, "data_pool")
+ if err != nil {
+ return err
+ }
+ gotDP, err := getObjProperty[string](gotSC, sc, "data_pool")
+ if err != nil {
+ return err
+ }
+ if expDP == gotDP {
+ continue
+ }
+ logger.Infof("do zone update workaround for zone %s, placement %s storage-class %s pool: %s -> %s", objContext.Zone, placementID, sc, gotDP, expDP)
+ args = []string{
+ "zone", "placement", "modify",
+ "--rgw-realm=" + objContext.Realm,
+ "--rgw-zonegroup=" + objContext.ZoneGroup,
+ "--rgw-zone=" + objContext.Zone,
+ "--placement-id", placementID,
+ "--storage-class", sc,
+ "--data-pool=" + expDP,
+ }
+ output, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return errors.Wrap(err, "failed to set zone config")
+ }
+ logger.Debugf("zone placement modify output=%s", output)
+ }
+
return nil
}
@@ -1333,3 +1394,17 @@ func CheckIfZonePresentInZoneGroup(objContext *Context) (bool, error) {
}
return false, nil
}
+
+// ValidateObjectStorePoolsConfig returns error if given ObjectStore pool configuration is inconsistent.
+func ValidateObjectStorePoolsConfig(metadataPool, dataPool cephv1.PoolSpec, sharedPools cephv1.ObjectSharedPoolsSpec) error {
+ if err := validatePoolPlacements(sharedPools.PoolPlacements); err != nil {
+ return err
+ }
+ if !EmptyPool(dataPool) && sharedPools.DataPoolName != "" {
+ return fmt.Errorf("invalidObjStorePoolCofig: object store dataPool and sharedPools.dataPool=%s are mutually exclusive. Only one of them can be set.", sharedPools.DataPoolName)
+ }
+ if !EmptyPool(metadataPool) && sharedPools.MetadataPoolName != "" {
+ return fmt.Errorf("invalidObjStorePoolCofig: object store metadataPool and sharedPools.metadataPool=%s are mutually exclusive. Only one of them can be set.", sharedPools.MetadataPoolName)
+ }
+ return nil
+}
diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go
index 33d33059629c..7c0d16c851c4 100644
--- a/pkg/operator/ceph/object/objectstore_test.go
+++ b/pkg/operator/ceph/object/objectstore_test.go
@@ -1,5 +1,4 @@
-/*
-Copyright 2016 The Rook Authors. All rights reserved.
+/* Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,8 +17,8 @@ package object
import (
"context"
- "encoding/json"
"fmt"
+ "os"
"strings"
"syscall"
"testing"
@@ -112,6 +111,85 @@ const (
"realm_id": "e7f176c6-d207-459c-aa04-c3334300ddc6",
"notif_pool": "rgw-meta-pool:store-a.log.notif"
}`
+ objectZoneSharedPoolsJsonTempl = `{
+ "id": "c1a20ed9-6370-4abd-b78c-bdf0da2a8dbb",
+ "name": "store-a",
+ "domain_root": "%[1]s:store-a.meta.root",
+ "control_pool": "%[1]s:store-a.control",
+ "gc_pool": "%[1]s:store-a.log.gc",
+ "lc_pool": "%[1]s:store-a.log.lc",
+ "log_pool": "%[1]s:store-a.log",
+ "intent_log_pool": "%[1]s:store-a.log.intent",
+ "usage_log_pool": "%[1]s:store-a.log.usage",
+ "roles_pool": "%[1]s:store-a.meta.roles",
+ "reshard_pool": "%[1]s:store-a.log.reshard",
+ "user_keys_pool": "%[1]s:store-a.meta.users.keys",
+ "user_email_pool": "%[1]s:store-a.meta.users.email",
+ "user_swift_pool": "%[1]s:store-a.meta.users.swift",
+ "user_uid_pool": "%[1]s:store-a.meta.users.uid",
+ "otp_pool": "%[1]s:store-a.otp",
+ "system_key": {
+ "access_key": "",
+ "secret_key": ""
+ },
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "data_extra_pool": "%[1]s:store-a.buckets.non-ec",
+ "index_pool": "%[1]s:store-a.buckets.index",
+ "index_type": 0,
+ "inline_data": true,
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "%[2]s:store-a.buckets.data"
+ }
+ }
+ }
+ }
+ ],
+ "realm_id": "e7f176c6-d207-459c-aa04-c3334300ddc6",
+ "notif_pool": "%[1]s:store-a.log.notif"
+}`
+
+ objectZonegroupJson = `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "store-a",
+ "api_name": "test",
+ "is_master": true,
+ "endpoints": [
+ "https://rook-ceph-rgw-test.rook-ceph.svc:443"
+ ],
+ "hostnames": [],
+ "hostnames_s3website": [],
+ "master_zone": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "zones": [
+ {
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "endpoints": [
+ "https://rook-ceph-rgw-test.rook-ceph.svc:443"
+ ]
+ }
+ ],
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd",
+ "sync_policy": {
+ "groups": []
+ },
+ "enabled_features": [
+ "resharding"
+ ]
+}`
//#nosec G101 -- The credentials are just for the unit tests
access_key = "VFKF8SSU9L3L2UR03Z8C"
@@ -149,183 +227,68 @@ func TestReconcileRealm(t *testing.T) {
assert.Nil(t, err)
}
-func TestApplyExpectedRadosNamespaceSettings(t *testing.T) {
- dataPoolName := "testdatapool"
- metaPrefix := "testmeta"
- dataPrefix := "testdata"
- var zoneConfig map[string]interface{}
-
- t.Run("fail when input empty", func(t *testing.T) {
- input := map[string]interface{}{}
- err := applyExpectedRadosNamespaceSettings(input, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), "placement_pools"))
- })
- t.Run("valid input", func(t *testing.T) {
- assert.NoError(t, json.Unmarshal([]byte(objectZoneJson), &zoneConfig))
- assert.NoError(t, applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName))
- // validate a sampling of the updated fields
- assert.Equal(t, metaPrefix+"log.notif", zoneConfig["notif_pool"])
- placementPools := zoneConfig["placement_pools"].([]interface{})
- placementPool := placementPools[0].(map[string]interface{})
- placementVals := placementPool["val"].(map[string]interface{})
- storageClasses := placementVals["storage_classes"].(map[string]interface{})
- stdStorageClass := storageClasses["STANDARD"].(map[string]interface{})
- assert.Equal(t, dataPoolName, stdStorageClass["data_pool"])
- })
- t.Run("placement pools empty", func(t *testing.T) {
- // remove expected sections of the json and confirm that it returns an error without throwing an exception
- emptyPlacementPoolsJson := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": []
- }`
- assert.NoError(t, json.Unmarshal([]byte(emptyPlacementPoolsJson), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), "no placement pools"))
- })
- t.Run("placement pool value missing", func(t *testing.T) {
- missingPoolValueJson := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement"
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(missingPoolValueJson), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "placement_pools[0].val")
- })
- t.Run("storage classes missing", func(t *testing.T) {
- storageClassesMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index"
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(storageClassesMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "storage_classes")
- })
- t.Run("standard storage class missing", func(t *testing.T) {
- standardSCMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index",
- "storage_classes": {
- "BAD": {
- "data_pool": "rgw-data-pool:store-a.buckets.data"
- }
- }
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(standardSCMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "storage_classes.STANDARD")
- })
- t.Run("no config missing", func(t *testing.T) {
- nothingMissing := `{
- "otp_pool": "rgw-meta-pool:store-a.otp",
- "placement_pools": [
- {
- "key": "default-placement",
- "val": {
- "index_pool": "rgw-meta-pool:store-a.buckets.index",
- "storage_classes": {
- "STANDARD": {
- "data_pool": "rgw-data-pool:store-a.buckets.data"
- }
- }
- }
- }
- ]
- }`
- assert.NoError(t, json.Unmarshal([]byte(nothingMissing), &zoneConfig))
- err := applyExpectedRadosNamespaceSettings(zoneConfig, metaPrefix, dataPrefix, dataPoolName)
- assert.NoError(t, err)
- })
-}
-
-func TestSharedPoolsExist(t *testing.T) {
- executor := &exectest.MockExecutor{}
- poolJson := ""
- mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
- logger.Infof("Command: %s %v", command, args)
- if args[0] == "osd" && args[1] == "lspools" {
- return poolJson, nil
- }
- return "", errors.Errorf("unexpected ceph command %q", args)
- }
- executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
- return mockExecutorFuncOutput(command, args...)
- }
- context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminTestClusterInfo("mycluster")}
- sharedPools := cephv1.ObjectSharedPoolsSpec{
- MetadataPoolName: "metapool",
- DataPoolName: "datapool",
- }
- poolJson = `[{"poolnum":1,"poolname":".mgr"},{"poolnum":13,"poolname":".rgw.root"},
- {"poolnum":14,"poolname":"rgw-meta-pool"},{"poolnum":15,"poolname":"rgw-data-pool"}]`
- err := sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "pools do not exist")
-
- sharedPools.MetadataPoolName = "rgw-meta-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "data pool does not exist")
-
- sharedPools.DataPoolName = "rgw-data-pool"
- sharedPools.MetadataPoolName = "bad-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "metadata pool does not exist")
-
- sharedPools.MetadataPoolName = "rgw-meta-pool"
- err = sharedPoolsExist(context, sharedPools)
- assert.NoError(t, err)
-}
-
func TestConfigureStoreWithSharedPools(t *testing.T) {
- dataPoolAlreadySet := "datapool:store-a.buckets.data"
+ sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet := "", ""
zoneGetCalled := false
zoneSetCalled := false
+ zoneGroupGetCalled := false
+ zoneGroupSetCalled := false
placementModifyCalled := false
mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
logger.Infof("Command: %s %v", command, args)
if args[0] == "osd" && args[1] == "lspools" {
- return `[{"poolnum":14,"poolname":"test-meta"},{"poolnum":15,"poolname":"test-data"}]`, nil
+ return `[{"poolnum":14,"poolname":"test-meta"},{"poolnum":15,"poolname":"test-data"},{"poolnum":16,"poolname":"fast-meta"},{"poolnum":17,"poolname":"fast-data"}]`, nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
+
executorFuncTimeout := func(timeout time.Duration, command string, args ...string) (string, error) {
logger.Infof("CommandTimeout: %s %v", command, args)
if args[0] == "zone" {
if args[1] == "get" {
zoneGetCalled = true
- replaceDataPool := "rgw-data-pool:store-a.buckets.data"
- return strings.Replace(objectZoneJson, replaceDataPool, dataPoolAlreadySet, -1), nil
+ if sharedDataPoolAlreadySet == "" && sharedMetaPoolAlreadySet == "" {
+ replaceDataPool := "rgw-data-pool:store-a.buckets.data"
+ return strings.Replace(objectZoneJson, replaceDataPool, "datapool:store-a.buckets.data", -1), nil
+ }
+ return fmt.Sprintf(objectZoneSharedPoolsJsonTempl, sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet), nil
} else if args[1] == "set" {
zoneSetCalled = true
+ for _, arg := range args {
+ if !strings.HasPrefix(arg, "--infile=") {
+ continue
+ }
+ file := strings.TrimPrefix(arg, "--infile=")
+ inBytes, err := os.ReadFile(file)
+ if err != nil {
+ panic(err)
+ }
+ return string(inBytes), nil
+ }
return objectZoneJson, nil
} else if args[1] == "placement" && args[2] == "modify" {
placementModifyCalled = true
return objectZoneJson, nil
}
+ } else if args[0] == "zonegroup" {
+ if args[1] == "get" {
+ zoneGroupGetCalled = true
+ return objectZonegroupJson, nil
+ } else if args[1] == "set" {
+ zoneGroupSetCalled = true
+ for _, arg := range args {
+ if !strings.HasPrefix(arg, "--infile=") {
+ continue
+ }
+ file := strings.TrimPrefix(arg, "--infile=")
+ inBytes, err := os.ReadFile(file)
+ if err != nil {
+ panic(err)
+ }
+ return string(inBytes), nil
+ }
+ return objectZonegroupJson, nil
+ }
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
@@ -351,6 +314,8 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
assert.False(t, zoneGetCalled)
assert.False(t, zoneSetCalled)
assert.False(t, placementModifyCalled)
+ assert.False(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled)
})
t.Run("configure the zone", func(t *testing.T) {
sharedPools := cephv1.ObjectSharedPoolsSpec{
@@ -361,7 +326,27 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
assert.NoError(t, err)
assert.True(t, zoneGetCalled)
assert.True(t, zoneSetCalled)
- assert.True(t, placementModifyCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled) // zone group is set only if extra pool placements specified
+ })
+ t.Run("configure with default placement", func(t *testing.T) {
+ sharedPools := cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "test-meta",
+ DataPoolName: "test-data",
+ },
+ },
+ }
+ err := ConfigureSharedPoolsForZone(context, sharedPools)
+ assert.NoError(t, err)
+ assert.True(t, zoneGetCalled)
+ assert.True(t, zoneSetCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled) // zone group is set only if extra pool placements specified
})
t.Run("data pool already set", func(t *testing.T) {
// Simulate that the data pool has already been set and the zone update can be skipped
@@ -369,15 +354,40 @@ func TestConfigureStoreWithSharedPools(t *testing.T) {
MetadataPoolName: "test-meta",
DataPoolName: "test-data",
}
- dataPoolAlreadySet = fmt.Sprintf("%s:%s.buckets.data", sharedPools.DataPoolName, context.Zone)
+ sharedMetaPoolAlreadySet, sharedDataPoolAlreadySet = "test-meta", "test-data"
zoneGetCalled = false
zoneSetCalled = false
placementModifyCalled = false
err := ConfigureSharedPoolsForZone(context, sharedPools)
assert.True(t, zoneGetCalled)
assert.False(t, zoneSetCalled)
- assert.False(t, placementModifyCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
assert.NoError(t, err)
+ assert.True(t, zoneGroupGetCalled)
+ assert.False(t, zoneGroupSetCalled)
+ })
+ t.Run("configure with extra placement", func(t *testing.T) {
+ sharedPools := cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "test-meta",
+ DataPoolName: "test-data",
+ },
+ {
+ Name: "fast",
+ MetadataPoolName: "fast-meta",
+ DataPoolName: "fast-data",
+ },
+ },
+ }
+ err := ConfigureSharedPoolsForZone(context, sharedPools)
+ assert.NoError(t, err)
+ assert.True(t, zoneGetCalled)
+ assert.True(t, zoneSetCalled)
+ assert.False(t, placementModifyCalled) // mock returns applied namespases, no workaround needed
+ assert.True(t, zoneGroupGetCalled)
+ assert.True(t, zoneGroupSetCalled)
})
}
@@ -1482,3 +1492,486 @@ func TestListsAreEqual(t *testing.T) {
})
}
}
+
+func TestValidateObjectStorePoolsConfig(t *testing.T) {
+ type args struct {
+ metadataPool cephv1.PoolSpec
+ dataPool cephv1.PoolSpec
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: nothing is set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only metadata pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only data pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only metadata and data pools set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared metadata pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared data pool set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: only shared data and metaData pools set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: shared meta and non-shared data",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "valid: shared data and non-shared meta",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: shared and non-shared meta set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ dataPool: cephv1.PoolSpec{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "test",
+ DataPoolName: "",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: shared and non-shared data set",
+ args: args{
+ metadataPool: cephv1.PoolSpec{},
+ dataPool: cephv1.PoolSpec{
+ FailureDomain: "host",
+ Replicated: cephv1.ReplicatedSpec{Size: 3},
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "test",
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: placements invalid",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "same_name",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "same_name",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := ValidateObjectStorePoolsConfig(tt.args.metadataPool, tt.args.dataPool, tt.args.sharedPools); (err != nil) != tt.wantErr {
+ t.Errorf("ValidateObjectStorePoolsConfig() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_sharedPoolsExist(t *testing.T) {
+ type args struct {
+ existsInCluster []string
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "all pool exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "meta pool not exists",
+ args: args{
+ existsInCluster: []string{
+ // "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "data pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ // "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement meta pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ // "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement data pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ // "placement-data",
+ "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement data non ec pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ // "placement-data-non-ec",
+ "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "placement storage class pool not exists",
+ args: args{
+ existsInCluster: []string{
+ "meta",
+ "data",
+ "placement-meta",
+ "placement-data",
+ "placement-data-non-ec",
+ // "placement-sc-data",
+ },
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "placement-meta",
+ DataPoolName: "placement-data",
+ DataNonECPoolName: "placement-data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "placement-sc-data",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "empty pool names ignored",
+ args: args{
+ existsInCluster: []string{},
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "",
+ DataPoolName: "",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "sc",
+ DataPoolName: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ executor := &exectest.MockExecutor{}
+ mockExecutorFuncOutput := func(command string, args ...string) (string, error) {
+ if args[0] == "osd" && args[1] == "lspools" {
+ pools := make([]string, len(tt.args.existsInCluster))
+ for i, p := range tt.args.existsInCluster {
+ pools[i] = fmt.Sprintf(`{"poolnum":%d,"poolname":%q}`, i+1, p)
+ }
+ poolJson := fmt.Sprintf(`[%s]`, strings.Join(pools, ","))
+ return poolJson, nil
+ }
+ return "", errors.Errorf("unexpected ceph command %q", args)
+ }
+ executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
+ return mockExecutorFuncOutput(command, args...)
+ }
+ context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminTestClusterInfo("mycluster")}
+
+ if err := sharedPoolsExist(context, tt.args.sharedPools); (err != nil) != tt.wantErr {
+ t.Errorf("sharedPoolsExist() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/pkg/operator/ceph/object/shared_pools.go b/pkg/operator/ceph/object/shared_pools.go
new file mode 100644
index 000000000000..0ab62b8d71d1
--- /dev/null
+++ b/pkg/operator/ceph/object/shared_pools.go
@@ -0,0 +1,510 @@
+package object
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+ "sort"
+
+ "github.com/pkg/errors"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+)
+
+const (
+ defaultPlacementName = "default"
+ defaultPlacementCephConfigName = "default-placement"
+ defaultPlacementStorageClass = "STANDARD"
+)
+
+func IsNeedToCreateObjectStorePools(sharedPools cephv1.ObjectSharedPoolsSpec) bool {
+ for _, pp := range sharedPools.PoolPlacements {
+ if pp.Name == defaultPlacementName {
+ // No need to create pools. External pools from default placement will be used
+ return false
+ }
+ }
+ if sharedPools.MetadataPoolName != "" && sharedPools.DataPoolName != "" {
+ // No need to create pools. Shared pools will be used
+ return false
+ }
+ return true
+}
+
+func validatePoolPlacements(placements []cephv1.PoolPlacementSpec) error {
+ names := make(map[string]struct{}, len(placements))
+ for _, p := range placements {
+ if _, ok := names[p.Name]; ok {
+ return fmt.Errorf("invalidObjStorePoolCofig: invalid placement %s: placement names must be unique", p.Name)
+ }
+ names[p.Name] = struct{}{}
+ if err := validatePoolPlacementStorageClasses(p.StorageClasses); err != nil {
+ return fmt.Errorf("invalidObjStorePoolCofig: invalid placement %s: %w", p.Name, err)
+ }
+ }
+ return nil
+}
+
+func validatePoolPlacementStorageClasses(scList []cephv1.PlacementStorageClassSpec) error {
+ names := make(map[string]struct{}, len(scList))
+ for _, sc := range scList {
+ if sc.Name == defaultPlacementStorageClass {
+ return fmt.Errorf("invalid placement StorageClass %q: %q name is reserved", sc.Name, defaultPlacementStorageClass)
+ }
+ if _, ok := names[sc.Name]; ok {
+ return fmt.Errorf("invalid placement StorageClass %q: name must be unique", sc.Name)
+ }
+ names[sc.Name] = struct{}{}
+ }
+ return nil
+}
+
+func adjustZonePlacementPools(zone map[string]interface{}, spec cephv1.ObjectSharedPoolsSpec) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
+ }
+
+ //deep copy source zone
+ zone, err = deepCopyJson(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy config for zone %s: %w", name, err)
+ }
+
+ placements, err := getObjProperty[[]interface{}](zone, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placements for zone %s: %w", name, err)
+ }
+
+ fromSpec := toZonePlacementPools(spec, name)
+
+ inConfig := map[string]struct{}{}
+ idxToRemove := map[int]struct{}{}
+ for i, p := range placements {
+ pObj, ok := p.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast pool placement to object for zone %s: %+v", name, p)
+ }
+ placementID, err := getObjProperty[string](pObj, "key")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement name for zone %s: %w", name, err)
+ }
+ // check if placement should be removed
+ if _, inSpec := fromSpec[placementID]; !inSpec && placementID != defaultPlacementCephConfigName {
+ // remove placement if it is not in spec, but don't remove default placement
+ idxToRemove[i] = struct{}{}
+ continue
+ }
+ // update placement with values from spec:
+ if pSpec, inSpec := fromSpec[placementID]; inSpec {
+ _, err = setObjProperty(pObj, pSpec.Val.IndexPool, "val", "index_pool")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set index pool to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ _, err = setObjProperty(pObj, pSpec.Val.DataExtraPool, "val", "data_extra_pool")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set data extra pool to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ scObj, err := toObj(pSpec.Val.StorageClasses)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert to pool placement %q storage class for zone %q: %w", placementID, name, err)
+ }
+
+ _, err = setObjProperty(pObj, scObj, "val", "storage_classes")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set storage classes to pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ inConfig[placementID] = struct{}{}
+ }
+ }
+ if len(idxToRemove) != 0 {
+ //delete placements from slice
+ updated := make([]interface{}, 0, len(placements)-len(idxToRemove))
+ for i := range placements {
+ if _, ok := idxToRemove[i]; ok {
+ //remove
+ continue
+ }
+ updated = append(updated, placements[i])
+ }
+ placements = updated
+ }
+
+ // add new placements from spec:
+ for placementID, p := range fromSpec {
+ if _, ok := inConfig[placementID]; ok {
+ //already in config
+ continue
+ }
+ pObj, err := toObj(p)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert pool placement %q for zone %q: %w", placementID, name, err)
+ }
+ placements = append(placements, pObj)
+ }
+
+ _, err = setObjProperty(zone, placements, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set pool placements for zone %q: %w", name, err)
+ }
+ return zone, nil
+}
+
+func getDefaultMetadataPool(spec cephv1.ObjectSharedPoolsSpec) string {
+ for _, p := range spec.PoolPlacements {
+ if p.Name == defaultPlacementName {
+ return p.MetadataPoolName
+ }
+ }
+ return spec.MetadataPoolName
+}
+
+// toZonePlacementPools converts pool placement CRD definition to zone config json format structures
+func toZonePlacementPools(spec cephv1.ObjectSharedPoolsSpec, ns string) map[string]ZonePlacementPool {
+ hasDefault := false
+ res := make(map[string]ZonePlacementPool, len(spec.PoolPlacements)+1)
+ for _, pp := range spec.PoolPlacements {
+ name := pp.Name
+ if pp.Name == defaultPlacementName {
+ hasDefault = true
+ name = defaultPlacementCephConfigName
+ }
+ res[name] = toZonePlacementPool(pp, ns)
+ }
+ if !hasDefault && spec.DataPoolName != "" && spec.MetadataPoolName != "" {
+ // set shared pools as default if no default placement was provided
+ res[defaultPlacementCephConfigName] = ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ // The extra pool is for omap data for multi-part uploads, so we use
+ // the metadata pool instead of the data pool.
+ DataExtraPool: spec.MetadataPoolName + ":" + ns + ".buckets.non-ec",
+ IndexPool: spec.MetadataPoolName + ":" + ns + ".buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: spec.DataPoolName + ":" + ns + ".buckets.data",
+ },
+ },
+ // Workaround: radosgw-admin set zone json command sets incorrect default value for placement inline_data field.
+ // So we should set default value (true) explicitly.
+ // See: https://tracker.ceph.com/issues/67933
+ InlineData: true,
+ },
+ }
+ }
+ return res
+}
+
+func toZonePlacementPool(spec cephv1.PoolPlacementSpec, ns string) ZonePlacementPool {
+ placementNS := ns
+ if spec.Name != defaultPlacementName {
+ placementNS += "." + spec.Name
+ }
+ // The extra pool is for omap data for multi-part uploads, so we use
+ // the metadata pool instead of the data pool.
+ nonECPool := spec.MetadataPoolName + ":" + placementNS + ".data.non-ec"
+ if spec.DataNonECPoolName != "" {
+ nonECPool = spec.DataNonECPoolName + ":" + placementNS + ".data.non-ec"
+ }
+
+ res := ZonePlacementPool{
+ Key: spec.Name,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: nonECPool,
+ IndexPool: spec.MetadataPoolName + ":" + placementNS + ".index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: spec.DataPoolName + ":" + placementNS + ".data",
+ },
+ },
+ // Workaround: radosgw-admin set zone json command sets incorrect default value for placement inline_data field.
+ // So we should set default value (true) explicitly.
+ // See: https://tracker.ceph.com/issues/67933
+ InlineData: true,
+ },
+ }
+ if res.Key == defaultPlacementName {
+ res.Key = defaultPlacementCephConfigName
+ }
+ for _, v := range spec.StorageClasses {
+ res.Val.StorageClasses[v.Name] = ZonePlacementStorageClass{
+ DataPool: v.DataPoolName + ":" + ns + "." + v.Name,
+ }
+ }
+ return res
+}
+
+func adjustZoneGroupPlacementTargets(group, zone map[string]interface{}) (map[string]interface{}, error) {
+ name, err := getObjProperty[string](group, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zonegroup name: %w", err)
+ }
+
+ //deep copy source group
+ group, err = deepCopyJson(group)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deep copy config for zonegroup %s: %w", name, err)
+ }
+
+ _, err = setObjProperty(group, defaultPlacementCephConfigName, "default_placement")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set default_placement for zonegroup %s: %w", name, err)
+ }
+
+ desiredTargets, err := createPlacementTargetsFromZonePoolPlacements(zone)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create targets from placements for zonegroup %q: %w", name, err)
+ }
+ currentTargets, err := getObjProperty[[]interface{}](group, "placement_targets")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get targets from placements for zonegroup %q: %w", name, err)
+ }
+
+ applied := map[string]struct{}{}
+ idxToRemove := map[int]struct{}{}
+ for i, target := range currentTargets {
+ tObj, ok := target.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast placement target to object for zonegroup %q: %+v", name, target)
+ }
+ tName, err := getObjProperty[string](tObj, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get placement target name for zonegroup %q: %w", name, err)
+ }
+ // update target:
+ if desired, ok := desiredTargets[tName]; ok {
+ sc := []interface{}{}
+ ok = castJson(desired.StorageClasses, &sc)
+ if ok {
+ _, err = setObjProperty(tObj, sc, "storage_classes")
+ } else {
+ _, err = setObjProperty(tObj, desired.StorageClasses, "storage_classes")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("unable to set storage classes to pool placement target %q for zonegroup %q: %w", tName, name, err)
+ }
+ applied[tName] = struct{}{}
+ } else {
+ // remove target
+ idxToRemove[i] = struct{}{}
+ continue
+ }
+ }
+ if len(idxToRemove) != 0 {
+ //delete targets from slice
+ updated := make([]interface{}, 0, len(currentTargets)-len(idxToRemove))
+ for i := range currentTargets {
+ if _, ok := idxToRemove[i]; ok {
+ //remove
+ continue
+ }
+ updated = append(updated, currentTargets[i])
+ }
+ currentTargets = updated
+ }
+
+ // add new targets:
+ for targetName, target := range desiredTargets {
+ if _, ok := applied[targetName]; ok {
+ //already in config
+ continue
+ }
+ tObj, err := toObj(target)
+ if err != nil {
+ return nil, fmt.Errorf("unable convert placement target %q for zonegroup %q: %w", targetName, name, err)
+ }
+ currentTargets = append(currentTargets, tObj)
+ }
+
+ _, err = setObjProperty(group, currentTargets, "placement_targets")
+ if err != nil {
+ return nil, fmt.Errorf("unable to set placement targets for zonegroup %q: %w", name, err)
+ }
+
+ return group, nil
+}
+
+func createPlacementTargetsFromZonePoolPlacements(zone map[string]interface{}) (map[string]ZonegroupPlacementTarget, error) {
+ zoneName, err := getObjProperty[string](zone, "name")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get zone name: %w", err)
+ }
+
+ zonePoolPlacements, err := getObjProperty[[]interface{}](zone, "placement_pools")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placements for zone %q: %w", zoneName, err)
+ }
+
+ res := make(map[string]ZonegroupPlacementTarget, len(zonePoolPlacements))
+ for _, pp := range zonePoolPlacements {
+ ppObj, ok := pp.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to cast zone pool placement to json obj for zone %q: %+v", zoneName, pp)
+ }
+ name, err := getObjProperty[string](ppObj, "key")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement key for zone %q: %w", zoneName, err)
+ }
+ storClasses, err := getObjProperty[map[string]interface{}](ppObj, "val", "storage_classes")
+ if err != nil {
+ return nil, fmt.Errorf("unable to get pool placement storage classes for zone %q: %w", zoneName, err)
+ }
+ target := ZonegroupPlacementTarget{
+ Name: name,
+ }
+ for sc := range storClasses {
+ target.StorageClasses = append(target.StorageClasses, sc)
+ }
+ sort.Strings(target.StorageClasses)
+ res[name] = target
+ }
+ return res, nil
+}
+
+func getZoneJSON(objContext *Context) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("get zone: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("get zone: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ logger.Debugf("get zone: rgw-realm=%s, rgw-zone=%s", objContext.Realm, objContext.Zone)
+
+ jsonStr, err := RunAdminCommandNoMultisite(objContext, true, "zone", "get", realmArg, zoneArg)
+ if err != nil {
+ // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
+ // The caller can nicely handle the error and not overflow the op logs with misleading error messages
+ if kerrors.IsNotFound(err) {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "failed to get rgw zone group")
+ }
+ logger.Debugf("get zone success: rgw-realm=%s, rgw-zone=%s, res=%s", objContext.Realm, objContext.Zone, jsonStr)
+ res := map[string]interface{}{}
+ return res, json.Unmarshal([]byte(jsonStr), &res)
+}
+
+func getZoneGroupJSON(objContext *Context) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("get zonegroup: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("get zonegroup: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ if objContext.ZoneGroup == "" {
+ return nil, fmt.Errorf("get zonegroup: object store zonegroup is missing from context")
+ }
+ zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
+
+ logger.Debugf("get zonegroup: rgw-realm=%s, rgw-zone=%s, rgw-zonegroup=%s", objContext.Realm, objContext.Zone, objContext.ZoneGroup)
+ jsonStr, err := RunAdminCommandNoMultisite(objContext, true, "zonegroup", "get", realmArg, zoneGroupArg, zoneArg)
+ if err != nil {
+ // This handles the case where the pod we use to exec command (act as a proxy) is not found/ready yet
+ // The caller can nicely handle the error and not overflow the op logs with misleading error messages
+ if kerrors.IsNotFound(err) {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "failed to get rgw zone group")
+ }
+ logger.Debugf("get zonegroup success: rgw-realm=%s, rgw-zone=%s, rgw-zonegroup=%s, res=%s", objContext.Realm, objContext.Zone, objContext.ZoneGroup, jsonStr)
+ res := map[string]interface{}{}
+ return res, json.Unmarshal([]byte(jsonStr), &res)
+}
+
+func updateZoneJSON(objContext *Context, zone map[string]interface{}) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("update zone: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("update zone: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ configBytes, err := json.Marshal(zone)
+ if err != nil {
+ return nil, err
+ }
+ configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonecfg")
+ if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
+ return nil, errors.Wrap(err, "failed to write zone config file")
+ }
+ defer os.Remove(configFilename)
+
+ args := []string{"zone", "set", zoneArg, "--infile=" + configFilename, realmArg}
+ updatedBytes, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to set zone config")
+ }
+ logger.Debugf("update zone: %s json config updated value from %q to %q", objContext.Zone, string(configBytes), string(updatedBytes))
+ updated := map[string]interface{}{}
+ err = json.Unmarshal([]byte(updatedBytes), &updated)
+ return updated, err
+}
+
+func updateZoneGroupJSON(objContext *Context, group map[string]interface{}) (map[string]interface{}, error) {
+ if objContext.Realm == "" {
+ return nil, fmt.Errorf("update zonegroup: object store realm is missing from context")
+ }
+ realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
+
+ if objContext.Zone == "" {
+ return nil, fmt.Errorf("update zonegroup: object store zone is missing from context")
+ }
+ zoneArg := fmt.Sprintf("--rgw-zone=%s", objContext.Zone)
+
+ if objContext.ZoneGroup == "" {
+ return nil, fmt.Errorf("update zonegroup: object store zonegroup is missing from context")
+ }
+ zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
+
+ configBytes, err := json.Marshal(group)
+ if err != nil {
+ return nil, err
+ }
+ configFilename := path.Join(objContext.Context.ConfigDir, objContext.Name+".zonegroupcfg")
+ if err := os.WriteFile(configFilename, configBytes, 0600); err != nil {
+ return nil, errors.Wrap(err, "failed to write zonegroup config file")
+ }
+ defer os.Remove(configFilename)
+
+ args := []string{"zonegroup", "set", zoneArg, "--infile=" + configFilename, realmArg, zoneGroupArg}
+ updatedBytes, err := RunAdminCommandNoMultisite(objContext, false, args...)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to set zone config")
+ }
+ updated := map[string]interface{}{}
+ err = json.Unmarshal([]byte(updatedBytes), &updated)
+ return updated, err
+}
+
+type ZonegroupPlacementTarget struct {
+ Name string `json:"name"`
+ StorageClasses []string `json:"storage_classes"`
+}
+
+type ZonePlacementPool struct {
+ Key string `json:"key"`
+ Val ZonePlacementPoolVal `json:"val"`
+}
+
+type ZonePlacementPoolVal struct {
+ DataExtraPool string `json:"data_extra_pool"`
+ IndexPool string `json:"index_pool"`
+ StorageClasses map[string]ZonePlacementStorageClass `json:"storage_classes"`
+ InlineData bool `json:"inline_data"`
+}
+
+type ZonePlacementStorageClass struct {
+ DataPool string `json:"data_pool"`
+}
diff --git a/pkg/operator/ceph/object/shared_pools_test.go b/pkg/operator/ceph/object/shared_pools_test.go
new file mode 100644
index 000000000000..33f3e6ed2484
--- /dev/null
+++ b/pkg/operator/ceph/object/shared_pools_test.go
@@ -0,0 +1,1803 @@
+package object
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_validatePoolPlacements(t *testing.T) {
+ type args struct {
+ placements []cephv1.PoolPlacementSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: names unique",
+ args: args{
+ placements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "name1",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "name2",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: duplicate names",
+ args: args{
+ placements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "name",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: "name",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validatePoolPlacements(tt.args.placements); (err != nil) != tt.wantErr {
+ t.Errorf("validatePoolPlacements() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_validatePoolPlacementStorageClasses(t *testing.T) {
+ type args struct {
+ scList []cephv1.PlacementStorageClassSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "valid: unique names",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "invalid: cannot override STANDARD",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "invalid: duplicate names",
+ args: args{
+ scList: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ {
+ Name: "STANDARD_IA",
+ DataPoolName: "", // handled by CRD validation
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validatePoolPlacementStorageClasses(tt.args.scList); (err != nil) != tt.wantErr {
+ t.Errorf("validatePoolPlacementStorageClasses() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestIsNeedToCreateObjectStorePools(t *testing.T) {
+ type args struct {
+ sharedPools cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "no need: both shared pools set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: false,
+ },
+ {
+ name: "no need: default placement is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "default",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: false,
+ },
+ {
+ name: "need: only meta shared pool set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: only data shared pool set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: nothing is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ want: true,
+ },
+ {
+ name: "need: no default placement is set",
+ args: args{
+ sharedPools: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "fast",
+ MetadataPoolName: "", // handled by CRD validation
+ DataPoolName: "", // handled by CRD validation
+ DataNonECPoolName: "", // handled by CRD validation
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := IsNeedToCreateObjectStorePools(tt.args.sharedPools); got != tt.want {
+ t.Errorf("IsNeedToCreateObjectStorePools() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getDefaultMetadataPool(t *testing.T) {
+ type args struct {
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {
+ name: "default placement is returned",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta2",
+ DataPoolName: "data2",
+ DataNonECPoolName: "data-non-ec2",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta2",
+ },
+ {
+ name: "default placement override shared pool",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-shared",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta2",
+ DataPoolName: "data2",
+ DataNonECPoolName: "data-non-ec2",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta2",
+ },
+ {
+ name: "shared pool returned if default placement not set",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-shared",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "meta-shared",
+ },
+ {
+ name: "no pool returned",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "data-shared",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "some_name",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec1",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ want: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := getDefaultMetadataPool(tt.args.spec); got != tt.want {
+ t.Errorf("getDefaultMetadataPool() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_toZonePlacementPool(t *testing.T) {
+ type args struct {
+ spec cephv1.PoolPlacementSpec
+ ns string
+ }
+ tests := []struct {
+ name string
+ args args
+ want ZonePlacementPool
+ }{
+ {
+ name: "map default placement without non-ec to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map default placement to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map default placement without extra SC to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.data.non-ec",
+ IndexPool: "meta:ns.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map non-default placement without non-ec to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: "placement",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:ns.placement.data.non-ec",
+ IndexPool: "meta:ns.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ {
+ name: "map non-default placement to config",
+ args: args{
+ spec: cephv1.PoolPlacementSpec{
+ Name: "placement",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ DataNonECPoolName: "repl",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ ns: "ns",
+ },
+ want: ZonePlacementPool{
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "repl:ns.placement.data.non-ec",
+ IndexPool: "meta:ns.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data:ns.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:ns.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.want, toZonePlacementPool(tt.args.spec, tt.args.ns))
+ })
+ }
+}
+
+func Test_toZonePlacementPools(t *testing.T) {
+ type args struct {
+ spec cephv1.ObjectSharedPoolsSpec
+ ns string
+ }
+ tests := []struct {
+ name string
+ args args
+ want map[string]ZonePlacementPool
+ }{
+ {
+ name: "backward compatible with prev shared pools",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:rgw-instance.buckets.non-ec",
+ IndexPool: "meta:rgw-instance.buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ "STANDARD": {
+ DataPool: "data:rgw-instance.buckets.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "default placement overrides shared pools",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.data.non-ec",
+ IndexPool: "meta1:rgw-instance.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "no default set",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ "placement": {
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.placement.data.non-ec",
+ IndexPool: "meta1:rgw-instance.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ {
+ name: "default shared and placement",
+ args: args{
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "placement",
+ MetadataPoolName: "meta1",
+ DataPoolName: "data1",
+ DataNonECPoolName: "data-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ },
+ },
+ },
+ ns: "rgw-instance",
+ },
+ want: map[string]ZonePlacementPool{
+ defaultPlacementCephConfigName: {
+ Key: defaultPlacementCephConfigName,
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "meta:rgw-instance.buckets.non-ec",
+ IndexPool: "meta:rgw-instance.buckets.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ "STANDARD": {
+ DataPool: "data:rgw-instance.buckets.data",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ "placement": {
+ Key: "placement",
+ Val: ZonePlacementPoolVal{
+ DataExtraPool: "data-non-ec:rgw-instance.placement.data.non-ec",
+ IndexPool: "meta1:rgw-instance.placement.index",
+ StorageClasses: map[string]ZonePlacementStorageClass{
+ defaultPlacementStorageClass: {
+ DataPool: "data1:rgw-instance.placement.data",
+ },
+ "REDUCED_REDUNDANCY": {
+ DataPool: "reduced:rgw-instance.REDUCED_REDUNDANCY",
+ },
+ },
+ InlineData: true,
+ },
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.want, toZonePlacementPools(tt.args.spec, tt.args.ns))
+ })
+ }
+}
+
+func Test_adjustZoneDefaultPools(t *testing.T) {
+ type args struct {
+ beforeJSON string
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantJSON string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "nothing changed if default shared pool not set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "non-default",
+ MetadataPoolName: "meta",
+ DataPoolName: "data",
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "shared pool set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "config equals to spec: no changes needed",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{},
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}
+`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default placement pool overrides shared pool",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "DomainRoot",
+ "control_pool": "ControlPool",
+ "gc_pool": "GcPool",
+ "lc_pool": "LcPool",
+ "log_pool": "LogPool",
+ "intent_log_pool": "IntentLogPool",
+ "usage_log_pool": "UsageLogPool",
+ "roles_pool": "RolesPool",
+ "reshard_pool": "ReshardPool",
+ "user_keys_pool": "UserKeysPool",
+ "user_email_pool": "UserEmailPool",
+ "user_swift_pool": "UserSwiftPool",
+ "user_uid_pool": "UserUIDPool",
+ "otp_pool": "OtpPool",
+ "notif_pool": "NotifPool",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "shared-meta-pool",
+ DataPoolName: "shared-data-pool",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: defaultPlacementName,
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "domain_root": "meta-pool:test.meta.root",
+ "control_pool": "meta-pool:test.control",
+ "gc_pool": "meta-pool:test.log.gc",
+ "lc_pool": "meta-pool:test.log.lc",
+ "log_pool": "meta-pool:test.log",
+ "intent_log_pool": "meta-pool:test.log.intent",
+ "usage_log_pool": "meta-pool:test.log.usage",
+ "roles_pool": "meta-pool:test.meta.roles",
+ "reshard_pool": "meta-pool:test.log.reshard",
+ "user_keys_pool": "meta-pool:test.meta.users.keys",
+ "user_email_pool": "meta-pool:test.meta.users.email",
+ "user_swift_pool": "meta-pool:test.meta.users.swift",
+ "user_uid_pool": "meta-pool:test.meta.users.uid",
+ "otp_pool": "meta-pool:test.otp",
+ "notif_pool": "meta-pool:test.log.notif",
+ "system_key": {
+ "access_key": "AccessKey",
+ "secret_key": "SecretKey"
+ },
+ "placement_pools": [],
+ "realm_id": "29e28253-be54-4581-90dd-206020d2fcdd"
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ srcZone := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.beforeJSON), &srcZone)
+ assert.NoError(t, err)
+ changedZone, err := adjustZoneDefaultPools(srcZone, tt.args.spec)
+
+ // check that source was not modified
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.beforeJSON), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, orig, srcZone, "src was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.Equal(t, tt.wantChanged, !reflect.DeepEqual(srcZone, changedZone))
+ bytes, err := json.Marshal(&changedZone)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+ })
+ }
+}
+
+func Test_adjustZonePlacementPools(t *testing.T) {
+ type args struct {
+ beforeJSON string
+ spec cephv1.ObjectSharedPoolsSpec
+ }
+ tests := []struct {
+ name string
+ args args
+ wantJSON string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "no changes: shared spec not set",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{},
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "no changes: spec equal to config",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "meta-pool:test.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "data-pool:test.buckets.data"
+ }
+ },
+ "data_extra_pool": "meta-pool:test.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "meta-pool",
+ DataPoolName: "data-pool",
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "meta-pool:test.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "data-pool:test.buckets.data"
+ }
+ },
+ "data_extra_pool": "meta-pool:test.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default placement is preserved when non-default placement added",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{{
+ Name: "fast",
+ MetadataPoolName: "fast-meta",
+ DataPoolName: "fast-data",
+ DataNonECPoolName: "fast-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{
+ {
+ Name: "REDUCED_REDUNDANCY",
+ DataPoolName: "reduced",
+ },
+ },
+ }},
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "fast",
+ "val": {
+ "index_pool": "fast-meta:test.fast.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "fast-data:test.fast.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "reduced:test.REDUCED_REDUNDANCY"
+ }
+ },
+ "data_extra_pool": "fast-non-ec:test.fast.data.non-ec",
+ "inline_data": true
+ }
+ }
+
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "delete placement",
+ args: args{
+ beforeJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "fast",
+ "val": {
+ "index_pool": "fast-meta:test.fast.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "fast-data:test.fast.data"
+ }
+ },
+ "data_extra_pool": "fast-non-ec:test.fast.data.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": false
+ }
+ }
+ ]
+}`,
+ spec: cephv1.ObjectSharedPoolsSpec{
+ MetadataPoolName: "",
+ DataPoolName: "",
+ PreserveRadosNamespaceDataOnDelete: false,
+ PoolPlacements: []cephv1.PoolPlacementSpec{
+ {
+ Name: "slow",
+ MetadataPoolName: "slow-meta",
+ DataPoolName: "slow-data",
+ DataNonECPoolName: "slow-non-ec",
+ StorageClasses: []cephv1.PlacementStorageClassSpec{},
+ },
+ },
+ },
+ },
+ wantJSON: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": false
+ }
+ }
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ srcZone := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.beforeJSON), &srcZone)
+ assert.NoError(t, err)
+ changedZone, err := adjustZonePlacementPools(srcZone, tt.args.spec)
+ // check that source zone was not modified:
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.beforeJSON), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, srcZone, orig, "source obj was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ bytes, err := json.Marshal(&changedZone)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantJSON, string(bytes))
+
+ assert.EqualValues(t, tt.wantChanged, !reflect.DeepEqual(srcZone, changedZone))
+ })
+ }
+}
+
+func Test_adjustZoneGroupPlacementTargets(t *testing.T) {
+ type args struct {
+ zone string
+ groupBefore string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantGroup string
+ wantChanged bool
+ wantErr bool
+ }{
+ {
+ name: "nothing changed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: false,
+ wantErr: false,
+ },
+ {
+ name: "default changed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "some-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "storage class added",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "placement added",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ },
+ {
+ "name": "slow",
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ {
+ name: "placement and sc removed",
+ args: args{
+ groupBefore: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "REDUCED_REDUNDANCY","STANDARD"
+ ]
+ },
+ {
+ "name": "slow",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec"
+ }
+ }
+ ]
+}`,
+ },
+ wantGroup: `{
+ "id": "610c9e3d-19e7-40b0-9f88-03319c4bc65a",
+ "name": "test",
+ "placement_targets": [
+ {
+ "name": "default-placement",
+ "tags": [],
+ "storage_classes": [
+ "STANDARD"
+ ]
+ }
+ ],
+ "default_placement": "default-placement",
+ "enabled_features": [
+ "resharding"
+ ]
+}`,
+ wantChanged: true,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ zj := map[string]interface{}{}
+ err := json.Unmarshal([]byte(tt.args.zone), &zj)
+ assert.NoError(t, err)
+ srcGroup := map[string]interface{}{}
+ err = json.Unmarshal([]byte(tt.args.groupBefore), &srcGroup)
+ assert.NoError(t, err)
+ changedGroup, err := adjustZoneGroupPlacementTargets(srcGroup, zj)
+
+ orig := map[string]interface{}{}
+ jErr := json.Unmarshal([]byte(tt.args.groupBefore), &orig)
+ assert.NoError(t, jErr)
+ assert.EqualValues(t, orig, srcGroup, "src was not modified")
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ return
+ } else {
+ assert.NoError(t, err)
+ }
+ assert.Equal(t, tt.wantChanged, !reflect.DeepEqual(srcGroup, changedGroup))
+ bytes, err := json.Marshal(changedGroup)
+ assert.NoError(t, err)
+ assert.JSONEq(t, tt.wantGroup, string(bytes))
+ })
+ }
+}
+
+func Test_createPlacementTargetsFromZonePoolPlacements(t *testing.T) {
+ type args struct {
+ zone string
+ }
+ tests := []struct {
+ name string
+ args args
+ want map[string]ZonegroupPlacementTarget
+ wantErr bool
+ }{
+ {
+ name: "",
+ args: args{
+ zone: `{
+ "id": "f539c2c0-e1ed-4c42-9294-41742352eeae",
+ "name": "test",
+ "placement_pools": [
+ {
+ "key": "default-placement",
+ "val": {
+ "index_pool": "test.rgw.buckets.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "test.rgw.buckets.data"
+ },
+ "REDUCED_REDUNDANCY": {
+ "data_pool": "test.rgw.buckets.data"
+ }
+ },
+ "data_extra_pool": "test.rgw.buckets.non-ec",
+ "index_type": 5,
+ "inline_data": true
+ }
+ },
+ {
+ "key": "slow",
+ "val": {
+ "index_pool": "slow-meta:test.slow.index",
+ "storage_classes": {
+ "STANDARD": {
+ "data_pool": "slow-data:test.slow.data"
+ }
+ },
+ "data_extra_pool": "slow-non-ec:test.slow.data.non-ec",
+ "index_type": 0,
+ "inline_data": true
+ }
+ }
+ ]
+}`,
+ },
+ want: map[string]ZonegroupPlacementTarget{
+ "default-placement": {
+ Name: "default-placement",
+ StorageClasses: []string{"REDUCED_REDUNDANCY", "STANDARD"},
+ },
+ "slow": {
+ Name: "slow",
+ StorageClasses: []string{"STANDARD"},
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ zo := map[string]interface{}{}
+ _ = json.Unmarshal([]byte(tt.args.zone), &zo)
+ got, err := createPlacementTargetsFromZonePoolPlacements(zo)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("createPlacementTargetsFromZonePoolPlacements() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("createPlacementTargetsFromZonePoolPlacements() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/operator/ceph/object/zone/controller.go b/pkg/operator/ceph/object/zone/controller.go
index 8d787b06340d..e809caaac55e 100644
--- a/pkg/operator/ceph/object/zone/controller.go
+++ b/pkg/operator/ceph/object/zone/controller.go
@@ -289,16 +289,22 @@ func (r *ReconcileObjectZone) createorUpdateCephZone(zone *cephv1.CephObjectZone
func (r *ReconcileObjectZone) createPoolsAndZone(objContext *object.Context, zone *cephv1.CephObjectZone, realmName string, zoneIsMaster bool) error {
// create pools for zone
logger.Debugf("creating pools ceph zone %q", zone.Name)
+ err := object.ValidateObjectStorePoolsConfig(zone.Spec.MetadataPool, zone.Spec.DataPool, zone.Spec.SharedPools)
+ if err != nil {
+ return fmt.Errorf("invalid zone pools config: %w", err)
+ }
+ if object.IsNeedToCreateObjectStorePools(zone.Spec.SharedPools) {
+ err = object.CreateObjectStorePools(objContext, r.clusterSpec, zone.Spec.MetadataPool, zone.Spec.DataPool)
+ if err != nil {
+ return fmt.Errorf("unable to create pools for zone: %w", err)
+ }
+ logger.Debugf("created pools ceph zone %q", zone.Name)
+ }
+
realmArg := fmt.Sprintf("--rgw-realm=%s", realmName)
zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", zone.Spec.ZoneGroup)
zoneArg := fmt.Sprintf("--rgw-zone=%s", zone.Name)
- err := object.ConfigurePools(objContext, r.clusterSpec, zone.Spec.MetadataPool, zone.Spec.DataPool, zone.Spec.SharedPools)
- if err != nil {
- return errors.Wrapf(err, "failed to create pools for zone %v", zone.Name)
- }
- logger.Debugf("created pools ceph zone %q", zone.Name)
-
accessKeyArg, secretKeyArg, err := object.GetRealmKeyArgs(r.opManagerContext, r.context, realmName, zone.Namespace)
if err != nil {
return errors.Wrap(err, "failed to get keys for realm")
@@ -326,6 +332,12 @@ func (r *ReconcileObjectZone) createPoolsAndZone(objContext *object.Context, zon
return errors.Wrapf(err, "failed to configure rados namespaces for zone")
}
+ // Commit rgw zone config changes
+ err = object.CommitConfigChanges(objContext)
+ if err != nil {
+ return errors.Wrapf(err, "failed to commit zone config changes")
+ }
+
return nil
}
@@ -412,6 +424,7 @@ func (r *ReconcileObjectZone) updateStatus(observedGeneration int64, name types.
}
logger.Debugf("object zone %q status updated to %q", name, status)
}
+
func (r *ReconcileObjectZone) deleteZone(objContext *object.Context) error {
realmArg := fmt.Sprintf("--rgw-realm=%s", objContext.Realm)
// zoneGroupArg := fmt.Sprintf("--rgw-zonegroup=%s", objContext.ZoneGroup)
@@ -481,6 +494,7 @@ func decodePoolPrefixfromZone(data string) (string, error) {
s := strings.Split(domain.DomainRoot, ".rgw.")
return s[0], err
}
+
func (r *ReconcileObjectZone) deleteCephObjectZone(zone *cephv1.CephObjectZone, realmName string) (reconcile.Result, error) {
logger.Debugf("deleting zone CR %q", zone.Name)
objContext := object.NewContext(r.context, r.clusterInfo, zone.Name)
From d041be4bcf25d4e67829f9de308e406396cbf8e6 Mon Sep 17 00:00:00 2001
From: Madhu Rajanna
Date: Fri, 6 Sep 2024 16:07:11 +0200
Subject: [PATCH 04/40] csi: update to new cephcsi release
we have 3.12.2 as the new cephcsi release
updating the rook to use the same.
Signed-off-by: Madhu Rajanna
---
Documentation/Helm-Charts/operator-chart.md | 2 +-
.../Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md | 6 +++---
.../Storage-Configuration/Ceph-CSI/custom-images.md | 2 +-
deploy/charts/rook-ceph/values.yaml | 2 +-
deploy/examples/images.txt | 2 +-
deploy/examples/operator-openshift.yaml | 2 +-
deploy/examples/operator.yaml | 2 +-
pkg/operator/ceph/csi/spec.go | 2 +-
pkg/operator/ceph/csi/util_test.go | 2 +-
9 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md
index f3816c76c320..7ff471e7df16 100644
--- a/Documentation/Helm-Charts/operator-chart.md
+++ b/Documentation/Helm-Charts/operator-chart.md
@@ -61,7 +61,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate | `RollingUpdate` |
| `csi.cephFSPluginUpdateStrategyMaxUnavailable` | A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy. | `1` |
| `csi.cephcsi.repository` | Ceph CSI image repository | `"quay.io/cephcsi/cephcsi"` |
-| `csi.cephcsi.tag` | Ceph CSI image tag | `"v3.12.0"` |
+| `csi.cephcsi.tag` | Ceph CSI image tag | `"v3.12.2"` |
| `csi.cephfsLivenessMetricsPort` | CSI CephFS driver metrics port | `9081` |
| `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Deployments and DaemonSets Pods | `nil` |
| `csi.clusterName` | Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster | `nil` |
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
index b904d2da8ab5..d81f55ebf2c2 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
@@ -210,10 +210,10 @@ CSI-Addons supports the following operations:
Ceph-CSI supports encrypting PersistentVolumeClaims (PVCs) for both RBD and CephFS.
This can be achieved using LUKS for RBD and fscrypt for CephFS. More details on encrypting RBD PVCs can be found
-[here](https://github.com/ceph/ceph-csi/blob/v3.12.0/docs/deploy-rbd.md#encryption-for-rbd-volumes),
+[here](https://github.com/ceph/ceph-csi/blob/v3.12.2/docs/deploy-rbd.md#encryption-for-rbd-volumes),
which includes a full list of supported encryption configurations.
-More details on encrypting CephFS PVCs can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.0/docs/deploy-cephfs.md#cephfs-volume-encryption).
-A sample KMS configmap can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.0/examples/kms/vault/kms-config.yaml).
+More details on encrypting CephFS PVCs can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.2/docs/deploy-cephfs.md#cephfs-volume-encryption).
+A sample KMS configmap can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.2/examples/kms/vault/kms-config.yaml).
!!! note
Not all KMS are compatible with fscrypt. Generally, KMS that either store secrets to use directly (like Vault)
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
index e3dedf6351c6..8629bbcc1b55 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
@@ -18,7 +18,7 @@ kubectl -n $ROOK_OPERATOR_NAMESPACE edit configmap rook-ceph-operator-config
The default upstream images are included below, which you can change to your desired images.
```yaml
-ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.2"
ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml
index 1b931a492693..387712a37e2f 100644
--- a/deploy/charts/rook-ceph/values.yaml
+++ b/deploy/charts/rook-ceph/values.yaml
@@ -487,7 +487,7 @@ csi:
# -- Ceph CSI image repository
repository: quay.io/cephcsi/cephcsi
# -- Ceph CSI image tag
- tag: v3.12.0
+ tag: v3.12.2
registrar:
# -- Kubernetes CSI registrar image repository
diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt
index 12ccd7dfd6ee..faab1198c12c 100644
--- a/deploy/examples/images.txt
+++ b/deploy/examples/images.txt
@@ -2,7 +2,7 @@
gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20240513-v0.1.0-35-gefb3255
quay.io/ceph/ceph:v18.2.4
quay.io/ceph/cosi:v0.1.2
- quay.io/cephcsi/cephcsi:v3.12.0
+ quay.io/cephcsi/cephcsi:v3.12.2
quay.io/csiaddons/k8s-sidecar:v0.9.1
registry.k8s.io/sig-storage/csi-attacher:v4.6.1
registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1
diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml
index 7d0c052d59e2..396f022dd420 100644
--- a/deploy/examples/operator-openshift.yaml
+++ b/deploy/examples/operator-openshift.yaml
@@ -197,7 +197,7 @@ data:
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.2"
# ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
# ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
# ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml
index 7f86068f50d9..1d60f5638559 100644
--- a/deploy/examples/operator.yaml
+++ b/deploy/examples/operator.yaml
@@ -127,7 +127,7 @@ data:
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:3.12.2"
# ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
# ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
# ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go
index d63e7bc9da2b..b3033f15cdeb 100644
--- a/pkg/operator/ceph/csi/spec.go
+++ b/pkg/operator/ceph/csi/spec.go
@@ -151,7 +151,7 @@ var (
// manually challenging.
var (
// image names
- DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.12.0"
+ DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.12.2"
DefaultRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
DefaultProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
DefaultAttacherImage = "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
diff --git a/pkg/operator/ceph/csi/util_test.go b/pkg/operator/ceph/csi/util_test.go
index 2308d426e14d..3c8afc0ba40d 100644
--- a/pkg/operator/ceph/csi/util_test.go
+++ b/pkg/operator/ceph/csi/util_test.go
@@ -284,7 +284,7 @@ func Test_getImage(t *testing.T) {
args: args{
data: map[string]string{},
settingName: "ROOK_CSI_CEPH_IMAGE",
- defaultImage: "quay.io/cephcsi/cephcsi:v3.12.0",
+ defaultImage: "quay.io/cephcsi/cephcsi:v3.12.2",
},
want: DefaultCSIPluginImage,
},
From d9d0d8e1d37cdb339aa8f3998d660d418ab83bcb Mon Sep 17 00:00:00 2001
From: Michael Adam
Date: Wed, 28 Aug 2024 10:00:25 +0200
Subject: [PATCH 05/40] ci: rename 'check-helm-docs' to 'check.helm-docs
This makes the naming of ci-related make targets more systematic and
consistent.
It is based on PR https://github.com/rook/rook/pull/14672
and continues the pattern started in that PR.
Signed-off-by: Michael Adam
---
.github/workflows/docs-check.yml | 2 +-
Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml
index 1a13e842d604..c110b2b7dc07 100644
--- a/.github/workflows/docs-check.yml
+++ b/.github/workflows/docs-check.yml
@@ -43,7 +43,7 @@ jobs:
!Documentation/Helm-Charts
- name: Check helm-docs
- run: make check-helm-docs
+ run: make check.helm-docs
- name: Check docs
run: make check.docs
- name: Install mkdocs and dependencies
diff --git a/Makefile b/Makefile
index 825664cf7937..6fef6948bb21 100644
--- a/Makefile
+++ b/Makefile
@@ -201,7 +201,7 @@ helm-docs: $(HELM_DOCS) ## Use helm-docs to generate documentation from helm cha
-t ../../../Documentation/Helm-Charts/ceph-cluster-chart.gotmpl.md \
-t ../../../Documentation/Helm-Charts/_templates.gotmpl
-check-helm-docs:
+check.helm-docs:
@$(MAKE) helm-docs
@git diff --exit-code || { \
echo "Please run 'make helm-docs' locally, commit the updated docs, and push the change. See https://rook.io/docs/rook/latest/Contributing/documentation/#making-docs" ; \
From a16ad75c929cce19ab29cda2fabea80fd61eb40f Mon Sep 17 00:00:00 2001
From: subhamkrai
Date: Fri, 6 Sep 2024 16:59:04 +0530
Subject: [PATCH 06/40] csi: stop deleting csi-operator resources
to keep it simple we'll not delete the csi-operator resources
we'll only document this for users who want to switch to csi-drivers.
Anyways by next release we'll only support csi-operator only.
Signed-off-by: subhamkrai
---
pkg/operator/ceph/csi/controller.go | 5 ---
pkg/operator/ceph/csi/operator_config.go | 17 ----------
pkg/operator/ceph/csi/spec.go | 43 ------------------------
3 files changed, 65 deletions(-)
diff --git a/pkg/operator/ceph/csi/controller.go b/pkg/operator/ceph/csi/controller.go
index f5341a48325f..b93fe7a09bb8 100644
--- a/pkg/operator/ceph/csi/controller.go
+++ b/pkg/operator/ceph/csi/controller.go
@@ -150,7 +150,6 @@ var reconcileSaveCSIDriverOptions = SaveCSIDriverOptions
func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, error) {
// reconcileResult is used to communicate the result of the reconciliation back to the caller
var reconcileResult reconcile.Result
- var clusterNamespace string
ownerRef, err := k8sutil.GetDeploymentOwnerReference(r.opManagerContext, r.context.Clientset, os.Getenv(k8sutil.PodNameEnvVar), r.opConfig.OperatorNamespace)
if err != nil {
@@ -295,7 +294,6 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to load cluster info for cluster %q", cluster.Name)
}
clusterInfo.OwnerInfo = k8sutil.NewOwnerInfo(&cephClusters.Items[i], r.scheme)
- clusterNamespace = clusterInfo.Namespace
// is holder enabled for this cluster?
thisHolderEnabled := (!csiHostNetworkEnabled || cluster.Spec.Network.IsMultus()) && !csiDisableHolders
@@ -331,13 +329,10 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to reconcile csi-op config CR")
}
return reconcileResult, nil
- } else {
- r.deleteCSIOperatorResources(clusterNamespace, false)
}
}
if !EnableCSIOperator() {
- r.deleteCSIOperatorResources(clusterNamespace, true)
err = r.validateAndConfigureDrivers(serverVersion, ownerInfo)
if err != nil {
diff --git a/pkg/operator/ceph/csi/operator_config.go b/pkg/operator/ceph/csi/operator_config.go
index 82f27c19a9ba..d1aceecfb698 100644
--- a/pkg/operator/ceph/csi/operator_config.go
+++ b/pkg/operator/ceph/csi/operator_config.go
@@ -176,20 +176,3 @@ func (r *ReconcileCSI) createImageSetConfigmap() (string, error) {
return cm.Name, nil
}
-
-func (r *ReconcileCSI) deleteImageSetConfigMap() error {
- cm := &v1.ConfigMap{}
- err := r.client.Get(r.opManagerContext, types.NamespacedName{Name: cm.Name, Namespace: r.opConfig.OperatorNamespace}, cm)
- if err != nil {
- if kerrors.IsNotFound(err) {
- return nil
- }
- }
- err = r.client.Delete(r.opManagerContext, cm)
- if nil != err {
- return errors.Wrapf(err, "failed to delete imageSet configMap %v", cm.Name)
- }
- logger.Infof("deleted imageSet configMap %q", cm.Name)
-
- return nil
-}
diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go
index 631b0446f01c..6b40437d9059 100644
--- a/pkg/operator/ceph/csi/spec.go
+++ b/pkg/operator/ceph/csi/spec.go
@@ -42,7 +42,6 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
- csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes"
)
@@ -759,48 +758,6 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
return nil
}
-func (r *ReconcileCSI) deleteCSIOperatorResources(clusterNamespace string, deleteOp bool) {
- csiCephConnection := &csiopv1a1.CephConnection{}
-
- err := r.client.DeleteAllOf(r.opManagerContext, csiCephConnection, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator Ceph Connection %q. %v", csiCephConnection.Name, err)
- } else {
- logger.Infof("deleted CSI-operator Ceph Connection %q", csiCephConnection.Name)
- }
-
- csiOpClientProfile := &csiopv1a1.ClientProfile{}
- err = r.client.DeleteAllOf(r.opManagerContext, csiOpClientProfile, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator client profile %q. %v", csiOpClientProfile.Name, err)
- } else {
- logger.Infof("deleted CSI-operator client profile %q", csiOpClientProfile.Name)
- }
-
- err = r.deleteImageSetConfigMap()
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Error("failed to delete imageSetConfigMap", err)
- }
-
- if deleteOp {
- csiDriver := &csiopv1a1.Driver{}
- err = r.client.DeleteAllOf(r.opManagerContext, csiDriver, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator driver config %q. %v", csiDriver.Name, err)
- } else {
- logger.Infof("deleted CSI-operator driver config %q", csiDriver.Name)
- }
-
- opConfig := &csiopv1a1.OperatorConfig{}
- err = r.client.DeleteAllOf(r.opManagerContext, opConfig, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
- if err != nil && !kerrors.IsNotFound(err) {
- logger.Errorf("failed to delete CSI-operator operator config %q. %v", opConfig.Name, err)
- } else {
- logger.Infof("deleted CSI-operator operator config %q", opConfig.Name)
- }
- }
-}
-
func (r *ReconcileCSI) deleteCSIDriverResources(ver *version.Info, daemonset, deployment, service, driverName string) error {
csiDriverobj := v1CsiDriver{}
err := k8sutil.DeleteDaemonset(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace, daemonset)
From 454ec1eea491fda6ff3535979e114a4110d86532 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Sep 2024 12:06:37 +0000
Subject: [PATCH 07/40] build(deps): bump the github-dependencies group with 2
updates
Bumps the github-dependencies group with 2 updates: [github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring](https://github.com/prometheus-operator/prometheus-operator) and [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator).
Updates `github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring` from 0.76.0 to 0.76.1
- [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases)
- [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.0...v0.76.1)
Updates `github.com/prometheus-operator/prometheus-operator/pkg/client` from 0.76.0 to 0.76.1
- [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases)
- [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.0...v0.76.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: github-dependencies
- dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: github-dependencies
...
Signed-off-by: dependabot[bot]
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 076aa298e388..789a8464d54b 100644
--- a/go.mod
+++ b/go.mod
@@ -31,8 +31,8 @@ require (
github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0
- github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1
+ github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1
github.com/rook/rook/pkg/apis v0.0.0-20231204200402-5287527732f7
github.com/sethvargo/go-password v0.3.1
github.com/spf13/cobra v1.8.1
diff --git a/go.sum b/go.sum
index 1faf19988f81..ccec62e8dcf6 100644
--- a/go.sum
+++ b/go.sum
@@ -778,11 +778,11 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0 h1:tRwEFYFg+To2TGnibGl8dHBCh8Z/BVNKnXj2O5Za/2M=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.0/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 h1:QU2cs0xxKYvF1JfibP/8vs+pFy6OvIpqNR2lYC4jYNU=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0 h1:bJhRd6R4kaYBZpH7cBrzbJpEKJjHx8cbVW1n3dxYnag=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.0/go.mod h1:Nu6G9XLApnqXqunMwMYulcHlaxRwoveH4p4WnZsBHD8=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1 h1:wMPmeRdflJFu14F0YaIiOIYGkBDDKipkeWW0q53d2+s=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1/go.mod h1:7vND+IkdMpZyfSyRs6P5/uXz6BlFDaOj8olErODi8I0=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
From 3b59db4b87145261ca94f2c2d0a6053a6798247b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Sep 2024 12:55:21 +0000
Subject: [PATCH 08/40] build(deps): bump wagoid/commitlint-github-action from
6.1.1 to 6.1.2
Bumps [wagoid/commitlint-github-action](https://github.com/wagoid/commitlint-github-action) from 6.1.1 to 6.1.2.
- [Changelog](https://github.com/wagoid/commitlint-github-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/wagoid/commitlint-github-action/compare/a2bc521d745b1ba127ee2f8b02d6afaa4eed035c...3d28780bbf0365e29b144e272b2121204d5be5f3)
---
updated-dependencies:
- dependency-name: wagoid/commitlint-github-action
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
.github/workflows/commitlint.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml
index 41f543423309..aa2feb26e75d 100644
--- a/.github/workflows/commitlint.yml
+++ b/.github/workflows/commitlint.yml
@@ -31,7 +31,7 @@ jobs:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- - uses: wagoid/commitlint-github-action@a2bc521d745b1ba127ee2f8b02d6afaa4eed035c # v6.1.1
+ - uses: wagoid/commitlint-github-action@3d28780bbf0365e29b144e272b2121204d5be5f3 # v6.1.2
with:
configFile: "./.commitlintrc.json"
helpURL: https://rook.io/docs/rook/latest/Contributing/development-flow/#commit-structure
From 67c40fe5beaddd1d803d3b47c214a462a9d726f4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 9 Sep 2024 12:55:29 +0000
Subject: [PATCH 09/40] build(deps): bump snyk/actions
Bumps [snyk/actions](https://github.com/snyk/actions) from 9213221444c2dc9e8b2502c1e857c26d851e84a7 to cdb760004ba9ea4d525f2e043745dfe85bb9077e.
- [Release notes](https://github.com/snyk/actions/releases)
- [Commits](https://github.com/snyk/actions/compare/9213221444c2dc9e8b2502c1e857c26d851e84a7...cdb760004ba9ea4d525f2e043745dfe85bb9077e)
---
updated-dependencies:
- dependency-name: snyk/actions
dependency-type: direct:production
...
Signed-off-by: dependabot[bot]
---
.github/workflows/snyk.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/snyk.yaml b/.github/workflows/snyk.yaml
index abb671219163..b8c2459ba132 100644
--- a/.github/workflows/snyk.yaml
+++ b/.github/workflows/snyk.yaml
@@ -21,7 +21,7 @@ jobs:
fetch-depth: 0
- name: run Snyk to check for code vulnerabilities
- uses: snyk/actions/golang@9213221444c2dc9e8b2502c1e857c26d851e84a7 # master
+ uses: snyk/actions/golang@cdb760004ba9ea4d525f2e043745dfe85bb9077e # master
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
GOFLAGS: "-buildvcs=false"
From b9188c793aceb6df9ada464c970e44ec2ca036b7 Mon Sep 17 00:00:00 2001
From: Travis Nielsen
Date: Mon, 9 Sep 2024 13:27:54 -0600
Subject: [PATCH 10/40] pool: allow negative step num in crush rule
The crush rules may have a negative step num.
Rook had assumed negative values were not possible,
but just had not been encountered previously in
a custom crush rule.
Signed-off-by: Travis Nielsen
---
pkg/daemon/ceph/client/crush.go | 2 +-
pkg/daemon/ceph/client/crush_rule.go | 2 +-
pkg/daemon/ceph/client/crush_rule_test.go | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/daemon/ceph/client/crush.go b/pkg/daemon/ceph/client/crush.go
index 2f1c4d4631cd..75bb6fc01caf 100644
--- a/pkg/daemon/ceph/client/crush.go
+++ b/pkg/daemon/ceph/client/crush.go
@@ -75,7 +75,7 @@ type ruleSpec struct {
type stepSpec struct {
Operation string `json:"op"`
- Number uint `json:"num"`
+ Number int `json:"num"`
Item int `json:"item"`
ItemName string `json:"item_name"`
Type string `json:"type"`
diff --git a/pkg/daemon/ceph/client/crush_rule.go b/pkg/daemon/ceph/client/crush_rule.go
index b79d5470ac5c..97575b2a8164 100644
--- a/pkg/daemon/ceph/client/crush_rule.go
+++ b/pkg/daemon/ceph/client/crush_rule.go
@@ -150,7 +150,7 @@ func buildTwoStepCrushSteps(pool cephv1.PoolSpec) []stepSpec {
// Step three
stepTakeSubFailureDomain := &stepSpec{
Operation: "chooseleaf_firstn",
- Number: pool.Replicated.ReplicasPerFailureDomain,
+ Number: int(pool.Replicated.ReplicasPerFailureDomain),
Type: pool.Replicated.SubFailureDomain,
}
steps = append(steps, *stepTakeSubFailureDomain)
diff --git a/pkg/daemon/ceph/client/crush_rule_test.go b/pkg/daemon/ceph/client/crush_rule_test.go
index ce799bfdf138..c48e52b7355d 100644
--- a/pkg/daemon/ceph/client/crush_rule_test.go
+++ b/pkg/daemon/ceph/client/crush_rule_test.go
@@ -56,7 +56,7 @@ func TestBuildCrushSteps(t *testing.T) {
assert.Equal(t, 4, len(steps))
assert.Equal(t, cephv1.DefaultCRUSHRoot, steps[0].ItemName)
assert.Equal(t, "datacenter", steps[1].Type)
- assert.Equal(t, uint(2), steps[2].Number)
+ assert.Equal(t, 2, steps[2].Number)
}
func TestCompileCRUSHMap(t *testing.T) {
From 4af5b6a33133e81b82e920160f74c7fada339881 Mon Sep 17 00:00:00 2001
From: parth-gr
Date: Mon, 9 Sep 2024 16:04:53 +0530
Subject: [PATCH 11/40] doc: add the pv encryption key rotation job
Add missing csi addon's feature in the Rook document
Signed-off-by: parth-gr
---
.../Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
index d81f55ebf2c2..dc5df80b8847 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
@@ -200,11 +200,18 @@ CSI-Addons supports the following operations:
* [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#reclaimspacecronjob)
* [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-perstentvolumeclaims)
* [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-storageclass)
* Network Fencing
* [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/networkfence.md)
* Volume Replication
* [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplicationclass.md)
* [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplication.md)
+* Key Rotation Job for PV encryption
+ * [Creating EncryptionKeyRotationJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationjob)
+ * [Creating EncryptionKeyRotationCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationcronjob)
+ * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-persistentvolumeclaims)
+ * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-storageclass)
## Enable RBD and CephFS Encryption Support
From 645ae2b86f8ff9323cfd38f14ae4b254bdc87471 Mon Sep 17 00:00:00 2001
From: Blaine Gardner
Date: Wed, 11 Sep 2024 11:05:52 -0600
Subject: [PATCH 12/40] object: add missing codegen from pool placements
When pool placements were recently added to the object store CRD, it
seems that codegen didn't get run, and Rook's CI didn't accurately catch
the codegen issue. This will address adding the missing generated code.
Follow-up work will address fixing CI so that it doesn't continue
missing when codegen needs to be run.
Signed-off-by: Blaine Gardner
---
.../ceph.rook.io/v1/zz_generated.deepcopy.go | 48 ++++++++++++++++++-
1 file changed, 46 insertions(+), 2 deletions(-)
diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
index bd6bf6821cef..6ac445966598 100644
--- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
@@ -3632,6 +3632,13 @@ func (in *ObjectRealmSpec) DeepCopy() *ObjectRealmSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectSharedPoolsSpec) DeepCopyInto(out *ObjectSharedPoolsSpec) {
*out = *in
+ if in.PoolPlacements != nil {
+ in, out := &in.PoolPlacements, &out.PoolPlacements
+ *out = make([]PoolPlacementSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -3694,7 +3701,7 @@ func (in *ObjectStoreSpec) DeepCopyInto(out *ObjectStoreSpec) {
*out = *in
in.MetadataPool.DeepCopyInto(&out.MetadataPool)
in.DataPool.DeepCopyInto(&out.DataPool)
- out.SharedPools = in.SharedPools
+ in.SharedPools.DeepCopyInto(&out.SharedPools)
in.Gateway.DeepCopyInto(&out.Gateway)
in.Protocols.DeepCopyInto(&out.Protocols)
in.Auth.DeepCopyInto(&out.Auth)
@@ -3876,7 +3883,7 @@ func (in *ObjectZoneSpec) DeepCopyInto(out *ObjectZoneSpec) {
*out = *in
in.MetadataPool.DeepCopyInto(&out.MetadataPool)
in.DataPool.DeepCopyInto(&out.DataPool)
- out.SharedPools = in.SharedPools
+ in.SharedPools.DeepCopyInto(&out.SharedPools)
if in.CustomEndpoints != nil {
in, out := &in.CustomEndpoints, &out.CustomEndpoints
*out = make([]string, len(*in))
@@ -4010,6 +4017,22 @@ func (in PlacementSpec) DeepCopy() PlacementSpec {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlacementStorageClassSpec) DeepCopyInto(out *PlacementStorageClassSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStorageClassSpec.
+func (in *PlacementStorageClassSpec) DeepCopy() *PlacementStorageClassSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlacementStorageClassSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PoolMirroringInfo) DeepCopyInto(out *PoolMirroringInfo) {
*out = *in
@@ -4069,6 +4092,27 @@ func (in *PoolMirroringStatusSummarySpec) DeepCopy() *PoolMirroringStatusSummary
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PoolPlacementSpec) DeepCopyInto(out *PoolPlacementSpec) {
+ *out = *in
+ if in.StorageClasses != nil {
+ in, out := &in.StorageClasses, &out.StorageClasses
+ *out = make([]PlacementStorageClassSpec, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolPlacementSpec.
+func (in *PoolPlacementSpec) DeepCopy() *PoolPlacementSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PoolPlacementSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PoolSpec) DeepCopyInto(out *PoolSpec) {
*out = *in
From 36f6807fbeca45e308599ba35e036e422965294c Mon Sep 17 00:00:00 2001
From: Alexander Trost
Date: Mon, 2 Sep 2024 17:24:18 +0200
Subject: [PATCH 13/40] docs: add grafana dashboards files to docs
Co-authored-by: Travis Nielsen
Signed-off-by: Alexander Trost
---
.../Monitoring/ceph-monitoring.md | 8 +-
.../grafana/Ceph Cluster Dashboard.json | 5679 +++++++++++++++++
.../grafana/Ceph OSD Single Dashboard.json | 1615 +++++
.../grafana/Ceph Pools Dashboard.json | 925 +++
deploy/examples/monitoring/grafana/README.md | 12 +
5 files changed, 8236 insertions(+), 3 deletions(-)
create mode 100644 deploy/examples/monitoring/grafana/Ceph Cluster Dashboard.json
create mode 100644 deploy/examples/monitoring/grafana/Ceph OSD Single Dashboard.json
create mode 100644 deploy/examples/monitoring/grafana/Ceph Pools Dashboard.json
create mode 100644 deploy/examples/monitoring/grafana/README.md
diff --git a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
index 8781d91da911..40098c724f5b 100644
--- a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
+++ b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
@@ -228,9 +228,11 @@ The dashboards have been created by [@galexrt](https://github.com/galexrt). For
The following Grafana dashboards are available:
-- [Ceph - Cluster](https://grafana.com/grafana/dashboards/2842)
-- [Ceph - OSD (Single)](https://grafana.com/grafana/dashboards/5336)
-- [Ceph - Pools](https://grafana.com/grafana/dashboards/5342)
+- [Ceph - Cluster (ID: 2842)](https://grafana.com/grafana/dashboards/2842)
+- [Ceph - OSD (Single) (ID: 5336)](https://grafana.com/grafana/dashboards/5336)
+- [Ceph - Pools (ID: 5342)](https://grafana.com/grafana/dashboards/5342)
+
+The dashboard JSON files are also available on [GitHub here `/deploy/examples/monitoring/grafana/`](https://github.com/rook/rook/tree/master/deploy/examples/monitoring/grafana/).
## Updates and Upgrades
diff --git a/deploy/examples/monitoring/grafana/Ceph Cluster Dashboard.json b/deploy/examples/monitoring/grafana/Ceph Cluster Dashboard.json
new file mode 100644
index 000000000000..c2d012eab11a
--- /dev/null
+++ b/deploy/examples/monitoring/grafana/Ceph Cluster Dashboard.json
@@ -0,0 +1,5679 @@
+{
+ "__inputs": [],
+ "__requires": [
+ {
+ "id": "grafana",
+ "name": "Grafana",
+ "type": "grafana",
+ "version": "5.3.2"
+ },
+ {
+ "id": "graph",
+ "name": "Graph",
+ "type": "panel",
+ "version": "5.0.0"
+ },
+ {
+ "id": "heatmap",
+ "name": "Heatmap",
+ "type": "panel",
+ "version": "5.0.0"
+ },
+ {
+ "id": "singlestat",
+ "name": "Singlestat",
+ "type": "panel",
+ "version": "5.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "showIn": 0,
+ "tags": [],
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Overview of your Ceph cluster.",
+ "editable": false,
+ "gnetId": 2842,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "panels": [
+ {
+ "collapse": false,
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CLUSTER STATE",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "0": {
+ "text": "HEALTHY"
+ },
+ "1": {
+ "text": "WARNING"
+ },
+ "2": {
+ "text": "ERROR"
+ }
+ },
+ "type": "value"
+ },
+ {
+ "id": 1,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#9ac48a"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 1
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 2
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 0,
+ "y": 1
+ },
+ "id": 3,
+ "interval": "1m",
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_health_status{}",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Ceph health status",
+ "transparent": true,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 1,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "Bps"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 3,
+ "y": 1
+ },
+ "id": 4,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_w_in_bytes{}[5m]))",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Write Throughput",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 1,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#d44a3a"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 0
+ },
+ {
+ "color": "#9ac48a",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "Bps"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 6,
+ "y": 1
+ },
+ "id": 5,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_r_out_bytes{}[5m]))",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Read Throughput",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 0.025
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "decbytes"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 9,
+ "y": 1
+ },
+ "id": 6,
+ "interval": "1m",
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_cluster_total_bytes{}",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Cluster Capacity",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(245, 54, 54, 0.9)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 0.1
+ },
+ {
+ "color": "rgba(50, 172, 45, 0.97)",
+ "value": 0.3
+ }
+ ]
+ },
+ "unit": "percentunit"
+ }
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 3,
+ "x": 12,
+ "y": 1
+ },
+ "id": 7,
+ "interval": "1m",
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_cluster_total_bytes{}-ceph_cluster_total_used_bytes{})/ceph_cluster_total_bytes{}",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Available Capacity",
+ "transparent": false,
+ "type": "gauge"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 15,
+ "y": 1
+ },
+ "id": 8,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pool_objects{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of Objects",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 1,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 18,
+ "y": 1
+ },
+ "id": 9,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "delta"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_osd_op_w_in_bytes{})",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Bytes Written",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 1,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 21,
+ "y": 1
+ },
+ "id": 10,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "delta"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_osd_op_r_out_bytes{})",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Bytes Read",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#9ac48a"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 1
+ },
+ {
+ "color": "#e24d42",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 0,
+ "y": 4
+ },
+ "id": 11,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count(ALERTS{alertstate=\"firing\",alertname=~\"^Ceph.+\"}) OR vector(0)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Alerts starting with Ceph",
+ "transparent": true,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "ops"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 3,
+ "y": 4
+ },
+ "id": 12,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_w{}[5m]))",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Write IOPS",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#d44a3a"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 0
+ },
+ {
+ "color": "#9ac48a",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "ops"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 6,
+ "y": 4
+ },
+ "id": 13,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_r{}[5m]))",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Read IOPS",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 0.025
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 0.1
+ }
+ ]
+ },
+ "unit": "decbytes"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 9,
+ "y": 4
+ },
+ "id": 14,
+ "interval": "1m",
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_cluster_total_used_bytes{}",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Used Capacity",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#ea6460"
+ },
+ {
+ "color": "#052b51",
+ "value": 0
+ },
+ {
+ "color": "#508642",
+ "value": 0
+ }
+ ]
+ },
+ "unit": "short"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 15,
+ "y": 4
+ },
+ "id": 15,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "diff"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pool_objects)",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Difference",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 128
+ }
+ ]
+ },
+ "unit": "short"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 18,
+ "y": 4
+ },
+ "id": 16,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_mon_num_sessions{})",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Mon Session Num",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": null
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 2
+ },
+ {
+ "color": "green",
+ "value": 3
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 21,
+ "y": 4
+ },
+ "id": 17,
+ "interval": "1m",
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count(ceph_mon_quorum_status{}) or vector(0)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Monitors In Quorum",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 7
+ },
+ "id": 18,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OSD STATE",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#9ac48a"
+ },
+ {
+ "color": "rgba(237, 40, 40, 0.89)",
+ "value": 1
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 0,
+ "y": 8
+ },
+ "id": 19,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count(ceph_osd_up{}) - count(ceph_osd_in{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "OSDs OUT",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "#eab839",
+ "value": 1
+ },
+ {
+ "color": "#ea6460",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 2,
+ "y": 8
+ },
+ "id": 20,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count(ceph_osd_up{} == 0.0) OR vector(0)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "OSDs DOWN",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 4,
+ "y": 8
+ },
+ "id": 21,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_osd_up{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "OSDs UP",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 0,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 6,
+ "y": 8
+ },
+ "id": 22,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "none",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_osd_in{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "OSDs IN",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 1,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 250
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 300
+ }
+ ]
+ },
+ "unit": "none"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 8,
+ "y": 8
+ },
+ "id": 23,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(ceph_osd_numpg{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Avg PGs",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 10
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 50
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 10,
+ "y": 8
+ },
+ "id": 24,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(ceph_osd_apply_latency_ms{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Avg Apply Latency",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 10
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 50
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 13,
+ "y": 8
+ },
+ "id": 25,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(ceph_osd_commit_latency_ms{})",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Avg Commit Latency",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 4,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "color": "#299c46",
+ "text": "0"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#299c46"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 1
+ },
+ {
+ "color": "#d44a3a",
+ "value": 2
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 16,
+ "y": 8
+ },
+ "id": 26,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m]) >= 0)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Avg Op Write Latency",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 6,
+ "links": [],
+ "mappings": [
+ {
+ "id": 0,
+ "options": {
+ "match": null,
+ "result": {
+ "color": "#299c46",
+ "text": "0"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#299c46"
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 1
+ },
+ {
+ "color": "#d44a3a",
+ "value": 2
+ }
+ ]
+ },
+ "unit": "ms"
+ }
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 3,
+ "x": 19,
+ "y": 8
+ },
+ "id": 27,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(rate(ceph_osd_op_r_latency_sum{}[5m])/rate(ceph_osd_op_r_latency_count{}[5m]) >= 0)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Avg Op Read Latency",
+ "transparent": false,
+ "type": "stat"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 28,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CLUSTER STATS",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 40,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 0,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 2,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Available"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Total Capacity"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#447EBC",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Used"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#BF1B00",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Used"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#BF1B00",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_avail"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_space"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_used"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#890F02",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Total Capacity"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 3
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 10
+ },
+ "id": 29,
+ "interval": "$interval",
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_cluster_total_bytes{}-ceph_cluster_total_used_bytes{}",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Available",
+ "refId": "A",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_cluster_total_used_bytes{}",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Used",
+ "refId": "B",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_cluster_total_bytes{}",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Total Capacity",
+ "refId": "C",
+ "step": 300
+ }
+ ],
+ "title": "Capacity",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Total Capacity"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Used"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#BF1B00",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_avail"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_space"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total_used"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#890F02",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 10
+ },
+ "id": 30,
+ "interval": "$interval",
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_w{}[5m]))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Write",
+ "refId": "A",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_r{}[5m]))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Read",
+ "refId": "B",
+ "step": 300
+ }
+ ],
+ "title": "IOPS",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 16,
+ "y": 10
+ },
+ "id": 31,
+ "interval": "$interval",
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_w_in_bytes{}[5m]))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Write",
+ "refId": "A",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_op_r_out_bytes{}[5m]))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Read",
+ "refId": "B",
+ "step": 300
+ }
+ ],
+ "title": "Cluster Throughput",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 18
+ },
+ "id": 32,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_bytes_used{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Pool Used Bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 18
+ },
+ "id": 33,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_avail_raw{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}} Avail",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_stored_raw{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}} Stored",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Pool RAW Bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 16,
+ "y": 18
+ },
+ "id": 34,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_objects{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Objects Per Pool",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 26
+ },
+ "id": 35,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_quota_bytes{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Pool Quota Bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 8,
+ "y": 26
+ },
+ "id": 36,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(ceph_pool_quota_objects{}) *on (pool_id) group_left(name)(ceph_pool_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{name}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Pool Objects Quota",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 16,
+ "y": 26
+ },
+ "id": 37,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count(ceph_bluestore_kv_commit_lat_count{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "BlueStore",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Type Count",
+ "type": "timeseries"
+ },
+ {
+ "collapse": true,
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 38,
+ "panels": [
+ {
+ "columns": [],
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "decimals": 2,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Time"
+ },
+ "properties": [
+ {
+ "id": "displayName",
+ "value": "Time"
+ },
+ {
+ "id": "unit",
+ "value": "time: YYYY-MM-DD HH:mm:ss"
+ },
+ {
+ "id": "custom.align"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 28
+ },
+ "id": 39,
+ "links": [],
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "9.1.3",
+ "styles": "",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ALERTS{alertstate=\"firing\",alertname=~\"^Ceph.+\"}",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Alerts starting with Ceph",
+ "transformations": [
+ {
+ "id": "merge",
+ "options": {
+ "reducer": []
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "columns": [],
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "filterable": false,
+ "inspect": false
+ },
+ "decimals": 2,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Time"
+ },
+ "properties": [
+ {
+ "id": "displayName",
+ "value": "Time"
+ },
+ {
+ "id": "unit",
+ "value": "time: YYYY-MM-DD HH:mm:ss"
+ },
+ {
+ "id": "custom.align"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 28
+ },
+ "id": 40,
+ "links": [],
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "9.1.3",
+ "styles": "",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "topk(5,sort_desc(ceph_osd_apply_latency_ms{} + ceph_osd_commit_latency_ms{}))",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 1,
+ "legendFormat": "_auto",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top Sluggish OSDs",
+ "transformations": [
+ {
+ "id": "merge",
+ "options": {
+ "reducer": []
+ }
+ }
+ ],
+ "type": "table"
+ },
+ {
+ "columns": [],
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "displayMode": "auto",
+ "inspect": false
+ },
+ "decimals": 2,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Time"
+ },
+ "properties": [
+ {
+ "id": "displayName",
+ "value": "Time"
+ },
+ {
+ "id": "unit",
+ "value": "time: YYYY-MM-DD HH:mm:ss"
+ },
+ {
+ "id": "custom.align"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 28
+ },
+ "id": 41,
+ "links": [],
+ "options": {
+ "footer": {
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "9.1.3",
+ "styles": "",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_osd_up{} == 0",
+ "format": "table",
+ "instant": true,
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Down OSDs",
+ "transformations": [
+ {
+ "id": "merge",
+ "options": {
+ "reducer": []
+ }
+ }
+ ],
+ "type": "table"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Alerts",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": true,
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 42,
+ "panels": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "scaleDistributionLog": 2,
+ "type": "log"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 29
+ },
+ "id": 43,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "node_memory_Active_anon_bytes{}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(node_memory_Active_anon_bytes{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "Cluster Memory Usage",
+ "refId": "B"
+ }
+ ],
+ "title": "Node Memory Usage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "scaleDistributionLog": 2,
+ "type": "log"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 29
+ },
+ "id": 44,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg by(instance)(irate(node_cpu_seconds_total{job='node',mode!=\"idle\"}[$interval])) * 100",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Node CPU Usage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 35
+ },
+ "id": 45,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum by (instance)(irate(node_disk_read_bytes_total{}[$interval]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Node Out",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 35
+ },
+ "id": 46,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum by (instance)(irate(node_disk_written_bytes_total{}[$interval]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Node In",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 44
+ },
+ "id": 47,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "(node_filesystem_free_bytes{ mountpoint=\"/\", device != \"rootfs\"})*100 / (node_filesystem_size_bytes{ mountpoint=\"/\", device != \"rootfs\"})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Free Space in root filesystem",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Node Statistics (NodeExporter)",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 45
+ },
+ "id": 48,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OBJECTS",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Total.*$/"
+ },
+ "properties": [
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 6,
+ "x": 0,
+ "y": 46
+ },
+ "id": 49,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pool_objects)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Total",
+ "range": true,
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "OSD Type Count",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Total.*$/"
+ },
+ "properties": [
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 8,
+ "x": 6,
+ "y": 46
+ },
+ "id": 50,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_active{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Active",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_clean{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Clean",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_peering{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Peering",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_degraded{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Degraded",
+ "range": true,
+ "refId": "D",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_stale{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Stale",
+ "range": true,
+ "refId": "E",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_unclean_pgs{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Unclean",
+ "range": true,
+ "refId": "F",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_undersized{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Undersized",
+ "range": true,
+ "refId": "G",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_incomplete{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Incomplete",
+ "range": true,
+ "refId": "H"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_forced_backfill{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Forced Backfill",
+ "range": true,
+ "refId": "I"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_forced_recovery{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Forced Recovery",
+ "range": true,
+ "refId": "J"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_creating{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Creating",
+ "range": true,
+ "refId": "K"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_wait_backfill{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Wait Backfill",
+ "range": true,
+ "refId": "L"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_deep{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Deep",
+ "range": true,
+ "refId": "M"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_scrubbing{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Scrubbing",
+ "range": true,
+ "refId": "N"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_recovering{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Recovering",
+ "range": true,
+ "refId": "O"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_repair{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Repair",
+ "range": true,
+ "refId": "P"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_down{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Down",
+ "range": true,
+ "refId": "Q"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_peered{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Peered",
+ "range": true,
+ "refId": "R"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_backfill{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Backfill",
+ "range": true,
+ "refId": "S"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_remapped{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Remapped",
+ "range": true,
+ "refId": "T"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_backfill_toofull{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Backfill Toofull",
+ "range": true,
+ "refId": "U"
+ }
+ ],
+ "title": "PGs State",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Total.*$/"
+ },
+ "properties": [
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 10,
+ "x": 14,
+ "y": 46
+ },
+ "id": 51,
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "asc"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_degraded{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Degraded",
+ "range": true,
+ "refId": "A",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_stale{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Stale",
+ "range": true,
+ "refId": "B",
+ "step": 300
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(ceph_pg_undersized{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Undersized",
+ "range": true,
+ "refId": "C",
+ "step": 300
+ }
+ ],
+ "title": "Stuck PGs",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 10,
+ "x": 14,
+ "y": 52
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "sum(irate(ceph_osd_recovery_ops{}[$interval]))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "OPS",
+ "refId": "A",
+ "step": 300
+ }
+ ],
+ "title": "Recovery Operations",
+ "type": "timeseries"
+ },
+ {
+ "collapse": false,
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 53
+ },
+ "id": 53,
+ "panels": [],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "LATENCY",
+ "titleSize": "h6",
+ "type": "row"
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ }
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 54
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 54,
+ "legend": {
+ "show": true
+ },
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ },
+ "value": "1"
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_osd_apply_latency_ms{}",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Apply Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": null,
+ "format": "ms",
+ "logBase": 2,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": 1
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": 10
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#65c5db",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ }
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 54
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 55,
+ "legend": {
+ "show": true
+ },
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#65c5db",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "ceph_osd_commit_latency_ms{}",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Commit Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": null,
+ "format": "ms",
+ "logBase": 2,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": 1
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#806eb7",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ }
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 62
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 56,
+ "legend": {
+ "show": true
+ },
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#806eb7",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 2,
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "rate(ceph_osd_op_r_latency_sum{}[5m]) / rate(ceph_osd_op_r_latency_count{}[5m]) >= 0",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Read Op Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": null,
+ "format": "ms",
+ "logBase": 2,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": 1
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#f9934e",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ }
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 62
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 57,
+ "legend": {
+ "show": true
+ },
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#f9934e",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 2,
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m]) >= 0",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Write Op Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": 2,
+ "format": "ms",
+ "logBase": 2,
+ "max": null,
+ "min": "0",
+ "show": true,
+ "splitFactor": 1
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 70
+ },
+ "id": 58,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(rate(ceph_osd_op_r_latency_sum{}[5m]) / rate(ceph_osd_op_r_latency_count{}[5m]) >= 0)",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "Read",
+ "refId": "A"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m]) >= 0)",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "Write",
+ "refId": "B"
+ }
+ ],
+ "title": "Recovery Operations",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 70
+ },
+ "id": 59,
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(ceph_osd_apply_latency_ms{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "apply",
+ "metric": "ceph_osd_perf_apply_latency_seconds",
+ "refId": "A",
+ "step": 4
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "avg(ceph_osd_commit_latency_ms{})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "commit",
+ "metric": "ceph_osd_perf_commit_latency_seconds",
+ "refId": "B",
+ "step": 4
+ }
+ ],
+ "title": "AVG OSD Apply + Commit Latency",
+ "type": "timeseries"
+ },
+ {
+ "collapse": true,
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 71
+ },
+ "id": 60,
+ "panels": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 6,
+ "x": 0,
+ "y": 72
+ },
+ "id": 61,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count by (ceph_version)(ceph_osd_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Ceph OSD Versions",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 6,
+ "x": 6,
+ "y": 72
+ },
+ "id": 62,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count by (ceph_version)(ceph_mon_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Ceph Mon Versions",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 6,
+ "x": 12,
+ "y": 72
+ },
+ "id": 63,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count by (ceph_version)(ceph_mds_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Ceph MDS Versions",
+ "type": "timeseries"
+ },
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 6,
+ "x": 18,
+ "y": 72
+ },
+ "id": 64,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": "${DS_PROMETHEUS}",
+ "expr": "count by (ceph_version)(ceph_rgw_metadata{})",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Ceph RGW Versions",
+ "type": "timeseries"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Ceph Versions",
+ "titleSize": "h6",
+ "type": "row"
+ }
+ ],
+ "refresh": "1m",
+ "rows": [],
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [
+ "ceph-mixin"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "label": "Data Source",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "auto": true,
+ "auto_count": 10,
+ "auto_min": "1m",
+ "current": {
+ "text": "$__auto_interval_interval",
+ "value": "$__auto_interval_interval"
+ },
+ "hide": 0,
+ "label": "Interval",
+ "name": "interval",
+ "options": [
+ {
+ "selected": true,
+ "text": "auto",
+ "value": "$__auto_interval_interval"
+ },
+ {
+ "selected": false,
+ "text": "5s",
+ "value": "5s"
+ },
+ {
+ "selected": false,
+ "text": "10s",
+ "value": "10s"
+ },
+ {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "5s,10s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "refresh": 2,
+ "type": "interval",
+ "valuelabels": {}
+ }
+ ]
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Ceph Cluster",
+ "uid": "tbO9LAiZK",
+ "version": 0
+}
diff --git a/deploy/examples/monitoring/grafana/Ceph OSD Single Dashboard.json b/deploy/examples/monitoring/grafana/Ceph OSD Single Dashboard.json
new file mode 100644
index 000000000000..1c5f9e51c495
--- /dev/null
+++ b/deploy/examples/monitoring/grafana/Ceph OSD Single Dashboard.json
@@ -0,0 +1,1615 @@
+{
+ "__inputs": [],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.0.5"
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "CEPH OSD Status.",
+ "editable": false,
+ "fiscalYearStartMonth": 0,
+ "gnetId": 5336,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 11,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Status / Total OSDs",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [
+ {
+ "options": {
+ "from": 0,
+ "result": {
+ "index": 0,
+ "text": "N/A"
+ },
+ "to": 0
+ },
+ "type": "range"
+ },
+ {
+ "options": {
+ "from": 0,
+ "result": {
+ "index": 1,
+ "text": "DOWN"
+ },
+ "to": 0.99
+ },
+ "type": "range"
+ },
+ {
+ "options": {
+ "from": 0.99,
+ "result": {
+ "index": 2,
+ "text": "UP"
+ },
+ "to": 1
+ },
+ "type": "range"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": null
+ },
+ {
+ "color": "rgba(237, 40, 40, 0.89)",
+ "value": 0
+ },
+ {
+ "color": "rgba(50, 172, 45, 0.97)",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 0,
+ "y": 1
+ },
+ "id": 6,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.0.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum without (instance) (ceph_osd_up{ceph_daemon=\"$osd\"})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "range": true,
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "Status",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [
+ {
+ "options": {
+ "from": 0,
+ "result": {
+ "index": 0,
+ "text": "N/A"
+ },
+ "to": 0
+ },
+ "type": "range"
+ },
+ {
+ "options": {
+ "from": 0,
+ "result": {
+ "index": 1,
+ "text": "OUT"
+ },
+ "to": 0.99
+ },
+ "type": "range"
+ },
+ {
+ "options": {
+ "from": 0.99,
+ "result": {
+ "index": 2,
+ "text": "IN"
+ },
+ "to": 1
+ },
+ "type": "range"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": null
+ },
+ {
+ "color": "rgba(237, 40, 40, 0.89)",
+ "value": 0
+ },
+ {
+ "color": "rgba(50, 172, 45, 0.97)",
+ "value": 1
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 2,
+ "y": 1
+ },
+ "id": 8,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.0.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance) (ceph_osd_in{ceph_daemon=\"$osd\"})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "Available",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [
+ {
+ "options": {
+ "from": 0,
+ "result": {
+ "index": 0,
+ "text": "N/A"
+ },
+ "to": 0
+ },
+ "type": "range"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(255, 255, 255, 0.9)",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 4,
+ "y": 1
+ },
+ "id": 10,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.0.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "count without (instance, ceph_daemon) (ceph_osd_up)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "Total OSDs",
+ "type": "stat"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 4
+ },
+ "id": 12,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "OSD: $osd",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "transparent",
+ "value": null
+ },
+ {
+ "color": "rgba(216, 200, 27, 0.27)",
+ "value": 250
+ },
+ {
+ "color": "rgba(234, 112, 112, 0.22)",
+ "value": 300
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Average.*/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 11,
+ "x": 0,
+ "y": 5
+ },
+ "id": 5,
+ "interval": "$interval",
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance, ceph_daemon) (ceph_osd_numpg{ceph_daemon=~\"$osd\"})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Number of PGs",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "avg without (instance, ceph_daemon) (ceph_osd_numpg)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Average Number of PGs in the Cluster",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "title": "PGs",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 9,
+ "x": 11,
+ "y": 5
+ },
+ "id": 2,
+ "interval": "$interval",
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance, ceph_daemon) (ceph_osd_stat_bytes{ceph_daemon=\"osd.0\"}-ceph_osd_stat_bytes_used{ceph_daemon=\"osd.0\"})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Available",
+ "metric": "ceph_osd_avail_bytes",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance, ceph_daemon) (ceph_osd_stat_bytes_used{ceph_daemon=~\"$osd\"})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Used",
+ "metric": "ceph_osd_avail_bytes",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "title": "OSD Storage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "rgba(50, 172, 45, 0.97)",
+ "value": null
+ },
+ {
+ "color": "rgba(237, 129, 40, 0.89)",
+ "value": 60
+ },
+ {
+ "color": "rgba(245, 54, 54, 0.9)",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 4,
+ "x": 20,
+ "y": 5
+ },
+ "id": 7,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.0.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance, ceph_daemon) (ceph_osd_stat_bytes_used{ceph_daemon=\"osd.0\"}/ceph_osd_stat_bytes{ceph_daemon=\"osd.0\"})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "Utilization",
+ "type": "gauge"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "id": 13,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Latency, Storage, Utilization Varience",
+ "type": "row"
+ },
+ {
+ "cards": {},
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 13
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 83,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ },
+ "value": "1"
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance) (ceph_osd_apply_latency_ms{ceph_daemon='$osd'})",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Apply Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketSize": "",
+ "yAxis": {
+ "format": "ms",
+ "logBase": 2,
+ "min": "0",
+ "show": true,
+ "splitFactor": 1
+ },
+ "yBucketBound": "auto",
+ "yBucketSize": 10
+ },
+ {
+ "cards": {},
+ "color": {
+ "cardColor": "#65c5db",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 84,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#65c5db",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance) (ceph_osd_commit_latency_ms{ceph_daemon='$osd'})",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Commit Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketSize": "",
+ "yAxis": {
+ "format": "ms",
+ "logBase": 2,
+ "min": "0",
+ "show": true
+ },
+ "yBucketBound": "auto"
+ },
+ {
+ "cards": {},
+ "color": {
+ "cardColor": "#806eb7",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 21
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 85,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#806eb7",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 2,
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance) (rate(ceph_osd_op_r_latency_sum{ceph_daemon='$osd'}[5m]) / rate(ceph_osd_op_r_latency_count{ceph_daemon='$osd'}[5m]) >= 0)",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Read Op Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": 2,
+ "format": "ms",
+ "logBase": 2,
+ "min": "0",
+ "show": true
+ },
+ "yBucketBound": "auto"
+ },
+ {
+ "cards": {},
+ "color": {
+ "cardColor": "#f9934e",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "opacity"
+ },
+ "dataFormat": "timeseries",
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 21
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 86,
+ "legend": {
+ "show": true
+ },
+ "links": [],
+ "options": {
+ "calculate": true,
+ "calculation": {
+ "yBuckets": {
+ "mode": "count",
+ "scale": {
+ "log": 2,
+ "type": "log"
+ }
+ }
+ },
+ "cellGap": 2,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#f9934e",
+ "mode": "opacity",
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": false
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 2,
+ "min": "0",
+ "reverse": false,
+ "unit": "ms"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance) (rate(ceph_osd_op_w_latency_sum{ceph_daemon='$osd'}[5m]) / rate(ceph_osd_op_w_latency_count{ceph_daemon='$osd'}[5m]) >= 0)",
+ "format": "time_series",
+ "instant": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "OSD Write Op Latency Distribution",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketSize": "",
+ "yAxis": {
+ "decimals": 2,
+ "format": "ms",
+ "logBase": 2,
+ "min": "0",
+ "show": true
+ },
+ "yBucketBound": "auto"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 29
+ },
+ "id": 44,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "avg without (instance, ceph_daemon) (rate(ceph_osd_op_r_latency_sum{ceph_daemon='$osd'}[5m]) / rate(ceph_osd_op_r_latency_count{ceph_daemon='$osd'}[5m]) >= 0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "read",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "avg without (instance, ceph_daemon) (rate(ceph_osd_op_w_latency_sum{ceph_daemon='$osd'}[5m]) / rate(ceph_osd_op_w_latency_count{ceph_daemon='$osd'}[5m]) >= 0)",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "write",
+ "refId": "B"
+ }
+ ],
+ "title": "Avg OSD Op Latency",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 29
+ },
+ "id": 35,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "avg without (instance, ceph_daemon) (ceph_osd_apply_latency_ms{ceph_daemon='$osd'})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "apply",
+ "metric": "ceph_osd_perf_apply_latency_seconds",
+ "refId": "A",
+ "step": 4
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "avg without (instance, ceph_daemon)(ceph_osd_commit_latency_ms{ceph_daemon='$osd'})",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "commit",
+ "metric": "ceph_osd_perf_commit_latency_seconds",
+ "refId": "B",
+ "step": 4
+ }
+ ],
+ "title": "AVG OSD Apply + Commit Latency",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "5m",
+ "schemaVersion": 36,
+ "style": "dark",
+ "tags": [
+ "ceph",
+ "osd"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": "Data source",
+ "multi": false,
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "auto": true,
+ "auto_count": 10,
+ "auto_min": "1m",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "datasource": "Prometheus",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Interval",
+ "multi": false,
+ "name": "interval",
+ "options": [
+ {
+ "selected": false,
+ "text": "auto",
+ "value": "$__auto_interval_interval"
+ },
+ {
+ "selected": false,
+ "text": "10s",
+ "value": "10s"
+ },
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "10s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(ceph_osd_up, ceph_daemon)",
+ "hide": 0,
+ "includeAll": false,
+ "label": "OSD",
+ "multi": false,
+ "name": "osd",
+ "options": [],
+ "query": {
+ "query": "label_values(ceph_osd_up, ceph_daemon)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-3h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Ceph - OSD (Single)",
+ "uid": "Fj5fAfzik",
+ "version": 2,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/deploy/examples/monitoring/grafana/Ceph Pools Dashboard.json b/deploy/examples/monitoring/grafana/Ceph Pools Dashboard.json
new file mode 100644
index 000000000000..3ea91bb90167
--- /dev/null
+++ b/deploy/examples/monitoring/grafana/Ceph Pools Dashboard.json
@@ -0,0 +1,925 @@
+{
+ "__inputs": [],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.0.5"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Ceph Pools dashboard.",
+ "editable": false,
+ "fiscalYearStartMonth": 0,
+ "gnetId": 5342,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 11,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Pool: $pool",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 30,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Total.*$/"
+ },
+ "properties": [
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 4
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": false,
+ "mode": "normal"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": "/^Raw.*$/"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#BF1B00",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 4
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 20,
+ "x": 0,
+ "y": 1
+ },
+ "id": 2,
+ "interval": "$interval",
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_max_avail) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Available - {{ name }}",
+ "metric": "ceph_pool_available_bytes",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_stored) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Stored - {{ name }}",
+ "metric": "ceph_pool",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_stored + ceph_pool_max_avail) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "hide": true,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Total - {{ name }}",
+ "metric": "ceph_pool",
+ "refId": "C",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_stored_raw) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Raw - {{ name }}",
+ "metric": "ceph_pool",
+ "refId": "D",
+ "step": 60
+ }
+ ],
+ "title": "Pool Storage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "decimals": 2,
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "text": "N/A"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "max": 1,
+ "min": 0,
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "yellow",
+ "value": 75
+ },
+ {
+ "color": "red",
+ "value": 90
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 4,
+ "x": 20,
+ "y": 1
+ },
+ "id": 10,
+ "links": [],
+ "maxDataPoints": 100,
+ "options": {
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.0.5",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum without (instance, pool_id, name) ((ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail)) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"}))",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "Usage",
+ "type": "gauge"
+ },
+ {
+ "collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 12,
+ "panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "refId": "A"
+ }
+ ],
+ "title": "Pool Info",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 7,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_objects) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Objects - {{ name }}",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_dirty) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Dirty Objects - {{ name }}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((ceph_pool_quota_objects) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Quota Objects - {{ name }}",
+ "refId": "C"
+ }
+ ],
+ "title": "Objects in Pool",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "IOPS",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 4,
+ "interval": "$interval",
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((irate(ceph_pool_rd[3m])) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Read - {{ name }}",
+ "refId": "B",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((irate(ceph_pool_wr[3m])) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Write - {{ name }}",
+ "refId": "A",
+ "step": 60
+ }
+ ],
+ "title": "IOPS",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 2,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": true,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 5,
+ "interval": "$interval",
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max",
+ "min"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.1.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((irate(ceph_pool_rd_bytes[5m])) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Read Bytes - {{ name }}",
+ "refId": "A",
+ "step": 60
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum((irate(ceph_pool_wr_bytes[5m])) *on (pool_id) group_left(name)(ceph_pool_metadata{name=~\"^$pool$\"})) by (name)",
+ "format": "time_series",
+ "interval": "$interval",
+ "intervalFactor": 1,
+ "legendFormat": "Written Bytes - {{ name }}",
+ "refId": "B",
+ "step": 60
+ }
+ ],
+ "title": "Throughput",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "1m",
+ "schemaVersion": 36,
+ "style": "dark",
+ "tags": [
+ "ceph",
+ "pools"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "Prometheus",
+ "value": "Prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": "Data source",
+ "multi": false,
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "auto": true,
+ "auto_count": 10,
+ "auto_min": "1m",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "datasource": "Prometheus",
+ "hide": 0,
+ "includeAll": false,
+ "label": "Interval",
+ "multi": false,
+ "name": "interval",
+ "options": [
+ {
+ "selected": false,
+ "text": "auto",
+ "value": "$__auto_interval_interval"
+ },
+ {
+ "selected": false,
+ "text": "10s",
+ "value": "10s"
+ },
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "10s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": ".*",
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(ceph_pool_metadata, name)",
+ "hide": 0,
+ "includeAll": true,
+ "label": "Pool",
+ "multi": true,
+ "name": "pool",
+ "options": [],
+ "query": {
+ "query": "label_values(ceph_pool_metadata, name)",
+ "refId": "Prometheus-pool-Variable-Query"
+ },
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-3h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Ceph - Pools",
+ "uid": "-gtf0Bzik",
+ "version": 2,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/deploy/examples/monitoring/grafana/README.md b/deploy/examples/monitoring/grafana/README.md
new file mode 100644
index 000000000000..225dd3566728
--- /dev/null
+++ b/deploy/examples/monitoring/grafana/README.md
@@ -0,0 +1,12 @@
+# Grafana Dashboards
+
+This folder contains the JSON files for the Grafana dashboards.
+The dashboards are based upon [the official Ceph Grafana dashboards](https://github.com/ceph/ceph/tree/main/monitoring/ceph-mixin) but with some slight tweaks.
+
+## Updating the Dashboards
+
+To update the dashboards, please export them via Grafana's built-in export function.
+
+Please note that exporting a dashboard from Grafana, that version of Grafana will be the minimum required version for all users. For example, exporting from Grafana 9.1.0 would require all users to also have at least Grafana version 9.1.0 running.
+
+So it might be a good idea to take that into account when updating the dashboards.
From 325950239b347ff8a2fee46613e04ad653a99c2b Mon Sep 17 00:00:00 2001
From: Alexander Trost
Date: Wed, 4 Sep 2024 15:31:36 +0200
Subject: [PATCH 14/40] ci: code spell ignore grafana dashboard json files
Signed-off-by: Alexander Trost
---
.github/workflows/codespell.yaml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml
index 961dcd7d23c7..78f387374c30 100644
--- a/.github/workflows/codespell.yaml
+++ b/.github/workflows/codespell.yaml
@@ -34,7 +34,8 @@ jobs:
# in other places, so ignore the file itself assuming it is correct
# crds.yaml, resources.yaml: CRD files are fully generated from content we control (should
# be flagged elsewhere) and content we don't control (can't fix easily), so ignore
- skip: .git,*.png,*.jpg,*.svg,*.sum,./LICENSE,./deploy/examples/crds.yaml,./deploy/charts/rook-ceph/templates/resources.yaml,./deploy/examples/csi-operator.yaml
+ # *Dashboard.jjson: Grafana dashboards that are generated via exporting them from Grafana.
+ skip: .git,*.png,*.jpg,*.svg,*.sum,./LICENSE,./deploy/examples/crds.yaml,./deploy/charts/rook-ceph/templates/resources.yaml,./deploy/examples/csi-operator.yaml,*Dashboard.json
# aks: Amazon Kubernetes Service
# keyserver: flag to apt-key
# atleast: codespell wants to flag any 'AtLeast' method
From 55d985409d810233cfa36ae29d29dffb6e7a55c6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:21:44 +0000
Subject: [PATCH 15/40] build(deps): bump github/codeql-action from 3.26.6 to
3.26.7
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.6 to 3.26.7.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/4dd16135b69a43b6c8efb853346f8437d92d3c93...8214744c546c1e5c8f03dde8fab3a7353211988d)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
.github/workflows/scorecards.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 315c4b6d3789..2a8dca108edb 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -64,6 +64,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
+ uses: github/codeql-action/upload-sarif@8214744c546c1e5c8f03dde8fab3a7353211988d # v3.26.7
with:
sarif_file: results.sarif
From 609634e01954a4ef8e9cfbf0eb3c2affba8fc6c9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:21:47 +0000
Subject: [PATCH 16/40] build(deps): bump DavidAnson/markdownlint-cli2-action
Bumps [DavidAnson/markdownlint-cli2-action](https://github.com/davidanson/markdownlint-cli2-action) from 16.0.0 to 17.0.0.
- [Release notes](https://github.com/davidanson/markdownlint-cli2-action/releases)
- [Commits](https://github.com/davidanson/markdownlint-cli2-action/compare/b4c9feab76d8025d1e83c653fa3990936df0e6c8...db43aef879112c3119a410d69f66701e0d530809)
---
updated-dependencies:
- dependency-name: DavidAnson/markdownlint-cli2-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
.github/workflows/docs-check.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml
index c110b2b7dc07..6a61cd4adc38 100644
--- a/.github/workflows/docs-check.yml
+++ b/.github/workflows/docs-check.yml
@@ -36,7 +36,7 @@ jobs:
with:
python-version: 3.9
- - uses: DavidAnson/markdownlint-cli2-action@b4c9feab76d8025d1e83c653fa3990936df0e6c8 # v16.0.0
+ - uses: DavidAnson/markdownlint-cli2-action@db43aef879112c3119a410d69f66701e0d530809 # v17.0.0
with:
globs: |
Documentation/**/*.md
From b2c39ace252b4548253c035efdcafff23ed7dd8b Mon Sep 17 00:00:00 2001
From: Artem Torubarov
Date: Mon, 16 Sep 2024 14:51:49 +0200
Subject: [PATCH 17/40] exporter: move config to cluster CRD
allows to set prio-limit and stats-period options to ceph exporter
Signed-off-by: Artem Torubarov
---
.../CRDs/Cluster/ceph-cluster-crd.md | 3 ++
Documentation/CRDs/specification.md | 53 +++++++++++++++++++
.../charts/rook-ceph/templates/resources.yaml | 14 +++++
deploy/examples/cluster.yaml | 9 ++++
deploy/examples/crds.yaml | 14 +++++
pkg/apis/ceph.rook.io/v1/types.go | 14 +++++
.../ceph/cluster/nodedaemon/exporter.go | 11 ++--
.../ceph/cluster/nodedaemon/exporter_test.go | 20 +++++++
8 files changed, 135 insertions(+), 3 deletions(-)
diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
index 67a4d3945de7..29ded5c34a8b 100755
--- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md
+++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
@@ -52,6 +52,9 @@ If this value is empty, each pod will get an ephemeral directory to store their
* `externalMgrPrometheusPort`: external prometheus manager module port. See [external cluster configuration](./external-cluster/external-cluster.md) for more details.
* `port`: The internal prometheus manager module port where the prometheus mgr module listens. The port may need to be configured when host networking is enabled.
* `interval`: The interval for the prometheus module to to scrape targets.
+ * `exporter`: Ceph exporter metrics config.
+ * `perfCountersPrioLimit`: Specifies which performance counters are exported. Corresponds to `--prio-limit` Ceph exporter flag. `0` - all counters are exported, default is `5`.
+ * `statsPeriodSeconds`: Time to wait before sending requests again to exporter server (seconds). Corresponds to `--stats-period` Ceph exporter flag. Default is `5`.
* `network`: For the network settings for the cluster, refer to the [network configuration settings](#network-configuration-settings)
* `mon`: contains mon related options [mon settings](#mon-settings)
For more details on the mons and when to choose a number other than `3`, see the [mon health doc](../../Storage-Configuration/Advanced/ceph-mon-health.md).
diff --git a/Documentation/CRDs/specification.md b/Documentation/CRDs/specification.md
index 662c8575c97f..ebd043d22f6e 100644
--- a/Documentation/CRDs/specification.md
+++ b/Documentation/CRDs/specification.md
@@ -3686,6 +3686,45 @@ map[string]int
+CephExporterSpec
+
+
+(Appears on: MonitoringSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+perfCountersPrioLimit
+
+int64
+
+
+
+Only performance counters greater than or equal to this option are fetched
+
+
+
+
+statsPeriodSeconds
+
+int64
+
+
+
+Time to wait before sending requests again to exporter server (seconds)
+
+
+
+
CephFilesystemStatus
@@ -8527,6 +8566,20 @@ Kubernetes meta/v1.Duration
Interval determines prometheus scrape interval
+
+
+exporter
+
+
+CephExporterSpec
+
+
+
+
+(Optional)
+Ceph exporter configuration
+
+
MultiClusterServiceSpec
diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml
index 767e08498461..0092ad799fad 100644
--- a/deploy/charts/rook-ceph/templates/resources.yaml
+++ b/deploy/charts/rook-ceph/templates/resources.yaml
@@ -2341,6 +2341,20 @@ spec:
Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus
types must exist or the creation will fail. Default is false.
type: boolean
+ exporter:
+ description: Ceph exporter configuration
+ properties:
+ perfCountersPrioLimit:
+ default: 5
+ description: Only performance counters greater than or equal to this option are fetched
+ format: int64
+ type: integer
+ statsPeriodSeconds:
+ default: 5
+ description: Time to wait before sending requests again to exporter server (seconds)
+ format: int64
+ type: integer
+ type: object
externalMgrEndpoints:
description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint
items:
diff --git a/deploy/examples/cluster.yaml b/deploy/examples/cluster.yaml
index 11860340376e..a01f99f396b0 100644
--- a/deploy/examples/cluster.yaml
+++ b/deploy/examples/cluster.yaml
@@ -85,6 +85,15 @@ spec:
# Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled.
# If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false.
metricsDisabled: false
+ # Ceph exporter metrics config.
+ exporter:
+ # Specifies which performance counters are exported.
+ # Corresponds to --prio-limit Ceph exporter flag
+ # 0 - all counters are exported
+ perfCountersPrioLimit: 5
+ # Time to wait before sending requests again to exporter server (seconds)
+ # Corresponds to --stats-period Ceph exporter flag
+ statsPeriodSeconds: 5
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml
index 9f7a3d225655..aa8759c17784 100644
--- a/deploy/examples/crds.yaml
+++ b/deploy/examples/crds.yaml
@@ -2339,6 +2339,20 @@ spec:
Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus
types must exist or the creation will fail. Default is false.
type: boolean
+ exporter:
+ description: Ceph exporter configuration
+ properties:
+ perfCountersPrioLimit:
+ default: 5
+ description: Only performance counters greater than or equal to this option are fetched
+ format: int64
+ type: integer
+ statsPeriodSeconds:
+ default: 5
+ description: Time to wait before sending requests again to exporter server (seconds)
+ format: int64
+ type: integer
+ type: object
externalMgrEndpoints:
description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint
items:
diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go
index c817ac41ac73..d06c20081f95 100755
--- a/pkg/apis/ceph.rook.io/v1/types.go
+++ b/pkg/apis/ceph.rook.io/v1/types.go
@@ -406,6 +406,20 @@ type MonitoringSpec struct {
// Interval determines prometheus scrape interval
// +optional
Interval *metav1.Duration `json:"interval,omitempty"`
+
+ // Ceph exporter configuration
+ // +optional
+ Exporter *CephExporterSpec `json:"exporter,omitempty"`
+}
+
+type CephExporterSpec struct {
+ // Only performance counters greater than or equal to this option are fetched
+ // +kubebuilder:default=5
+ PerfCountersPrioLimit int64 `json:"perfCountersPrioLimit,omitempty"`
+
+ // Time to wait before sending requests again to exporter server (seconds)
+ // +kubebuilder:default=5
+ StatsPeriodSeconds int64 `json:"statsPeriodSeconds,omitempty"`
}
// ClusterStatus represents the status of a Ceph cluster
diff --git a/pkg/operator/ceph/cluster/nodedaemon/exporter.go b/pkg/operator/ceph/cluster/nodedaemon/exporter.go
index 070bee756301..dec1809eb163 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/exporter.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/exporter.go
@@ -42,8 +42,8 @@ const (
monitoringPath = "/etc/ceph-monitoring/"
serviceMonitorFile = "exporter-service-monitor.yaml"
sockDir = "/run/ceph"
- perfCountersPrioLimit = "5"
- statsPeriod = "5"
+ defaultPrioLimit = "5"
+ defaultStatsPeriod = "5"
DefaultMetricsPort uint16 = 9926
exporterServiceMetricName = "ceph-exporter-http-metrics"
exporterKeyringUsername = "client.ceph-exporter"
@@ -179,10 +179,15 @@ func getCephExporterDaemonContainer(cephCluster cephv1.CephCluster, cephVersion
exporterEnvVar := generateExporterEnvVar()
envVars := append(controller.DaemonEnvVars(&cephCluster.Spec), exporterEnvVar)
+ prioLimit, statsPeriod := defaultPrioLimit, defaultStatsPeriod
+ if cephCluster.Spec.Monitoring.Exporter != nil {
+ prioLimit = strconv.Itoa(int(cephCluster.Spec.Monitoring.Exporter.PerfCountersPrioLimit))
+ statsPeriod = strconv.Itoa(int(cephCluster.Spec.Monitoring.Exporter.StatsPeriodSeconds))
+ }
args := []string{
"--sock-dir", sockDir,
"--port", strconv.Itoa(int(DefaultMetricsPort)),
- "--prio-limit", perfCountersPrioLimit,
+ "--prio-limit", prioLimit,
"--stats-period", statsPeriod,
}
diff --git a/pkg/operator/ceph/cluster/nodedaemon/exporter_test.go b/pkg/operator/ceph/cluster/nodedaemon/exporter_test.go
index 2a6adc90cd22..17ab9132dbff 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/exporter_test.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/exporter_test.go
@@ -121,6 +121,26 @@ func TestCreateOrUpdateCephExporter(t *testing.T) {
assert.Equal(t, tolerations, podSpec.Spec.Tolerations)
assert.Equal(t, true, podSpec.Spec.HostNetwork)
assert.Equal(t, "test-priority-class", podSpec.Spec.PriorityClassName)
+
+ t.Run("exporter config", func(t *testing.T) {
+ cephCluster.Spec.Monitoring.Exporter = &cephv1.CephExporterSpec{
+ PerfCountersPrioLimit: 3,
+ StatsPeriodSeconds: 7,
+ }
+ res, err := r.createOrUpdateCephExporter(node, tolerations, cephCluster, cephVersion)
+ assert.NoError(t, err)
+ assert.Equal(t, controllerutil.OperationResult("updated"), res)
+
+ err = r.client.Get(ctx, types.NamespacedName{Namespace: "rook-ceph", Name: name}, &deploy)
+ assert.NoError(t, err)
+
+ podSpec := deploy.Spec.Template
+ args := podSpec.Spec.Containers[0].Args
+ assert.Equal(t, "--prio-limit", args[4])
+ assert.Equal(t, "3", args[5])
+ assert.Equal(t, "--stats-period", args[6])
+ assert.Equal(t, "7", args[7])
+ })
}
func TestCephExporterBindAddress(t *testing.T) {
From 18d32ba3816adf1555ed04bd215c692d15f0c933 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:46:20 +0000
Subject: [PATCH 18/40] build(deps): bump the github-dependencies group with 4
updates
Bumps the github-dependencies group with 4 updates: [github.com/hashicorp/vault/api](https://github.com/hashicorp/vault), [github.com/k8snetworkplumbingwg/network-attachment-definition-client](https://github.com/k8snetworkplumbingwg/network-attachment-definition-client), [github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring](https://github.com/prometheus-operator/prometheus-operator) and [github.com/prometheus-operator/prometheus-operator/pkg/client](https://github.com/prometheus-operator/prometheus-operator).
Updates `github.com/hashicorp/vault/api` from 1.14.0 to 1.15.0
- [Release notes](https://github.com/hashicorp/vault/releases)
- [Changelog](https://github.com/hashicorp/vault/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/vault/compare/v1.14.0...v1.15.0)
Updates `github.com/k8snetworkplumbingwg/network-attachment-definition-client` from 1.7.1 to 1.7.3
- [Release notes](https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/releases)
- [Commits](https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/compare/v1.7.1...v1.7.3)
Updates `github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring` from 0.76.1 to 0.76.2
- [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases)
- [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.1...v0.76.2)
Updates `github.com/prometheus-operator/prometheus-operator/pkg/client` from 0.76.1 to 0.76.2
- [Release notes](https://github.com/prometheus-operator/prometheus-operator/releases)
- [Changelog](https://github.com/prometheus-operator/prometheus-operator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus-operator/prometheus-operator/compare/v0.76.1...v0.76.2)
---
updated-dependencies:
- dependency-name: github.com/hashicorp/vault/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: github-dependencies
- dependency-name: github.com/k8snetworkplumbingwg/network-attachment-definition-client
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: github-dependencies
- dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: github-dependencies
- dependency-name: github.com/prometheus-operator/prometheus-operator/pkg/client
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: github-dependencies
...
Signed-off-by: dependabot[bot]
---
go.mod | 10 +++++-----
go.sum | 19 ++++++++++---------
pkg/apis/go.mod | 6 +++---
pkg/apis/go.sum | 11 ++++++-----
4 files changed, 24 insertions(+), 22 deletions(-)
diff --git a/go.mod b/go.mod
index 789a8464d54b..79f415c05d60 100644
--- a/go.mod
+++ b/go.mod
@@ -26,13 +26,13 @@ require (
github.com/go-ini/ini v1.67.0
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
- github.com/hashicorp/vault/api v1.14.0
- github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1
+ github.com/hashicorp/vault/api v1.15.0
+ github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3
github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1
- github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2
+ github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2
github.com/rook/rook/pkg/apis v0.0.0-20231204200402-5287527732f7
github.com/sethvargo/go-password v0.3.1
github.com/spf13/cobra v1.8.1
@@ -66,6 +66,7 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
@@ -82,7 +83,6 @@ require (
github.com/ansel1/merry v1.8.0 // indirect
github.com/ansel1/merry/v2 v2.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containernetworking/cni v1.2.0-rc1 // indirect
diff --git a/go.sum b/go.sum
index ccec62e8dcf6..76c1460b89f6 100644
--- a/go.sum
+++ b/go.sum
@@ -161,8 +161,9 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
-github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c h1:JOhwt7+iM18pm9s9zAhAKGRJm615AdIaKklbUd7Z8So=
github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c/go.mod h1:odEUoarG26wXBCC2l4O4nMWhAz6VTKr2FRkv9yELgi8=
@@ -555,8 +556,8 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
-github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU=
-github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk=
+github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA=
+github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8=
github.com/hashicorp/vault/api/auth/approle v0.5.0/go.mod h1:CHOQIA1AZACfjTzHggmyfiOZ+xCSKNRFqe48FTCzH0k=
github.com/hashicorp/vault/api/auth/approle v0.6.0 h1:ELfFFQlTM/e97WJKu1HvNFa7lQ3tlTwwzrR1NJE1V7Y=
github.com/hashicorp/vault/api/auth/approle v0.6.0/go.mod h1:CCoIl1xBC3lAWpd1HV+0ovk76Z8b8Mdepyk21h3pGk0=
@@ -597,8 +598,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1 h1:n4FpoJ6aGDx8ULfya/C4ycrMDuPZlf7AtPyrT4+rIP4=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3 h1:W44yEuOvwcBErGzSjjVGEbmHh8oRGLmxDSC2yVJQ2aM=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -778,11 +779,11 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.46.0/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 h1:QU2cs0xxKYvF1JfibP/8vs+pFy6OvIpqNR2lYC4jYNU=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2 h1:BpGDC87A2SaxbKgONsFLEX3kRcRJee2aLQbjXsuz0hA=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.2/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM=
github.com/prometheus-operator/prometheus-operator/pkg/client v0.46.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1 h1:wMPmeRdflJFu14F0YaIiOIYGkBDDKipkeWW0q53d2+s=
-github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.1/go.mod h1:7vND+IkdMpZyfSyRs6P5/uXz6BlFDaOj8olErODi8I0=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2 h1:yncs8NglhE3hB+viNsabCAF9TBBDOBljHUyxHC5fSGY=
+github.com/prometheus-operator/prometheus-operator/pkg/client v0.76.2/go.mod h1:AfbzyEUFxJmSoTiMcgNHHjDKcorBVd9TIwx0viURgEw=
github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 27790be77987..79f7c259da9e 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -15,8 +15,8 @@ replace (
)
require (
- github.com/hashicorp/vault/api v1.14.0
- github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1
+ github.com/hashicorp/vault/api v1.15.0
+ github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3
github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
@@ -27,6 +27,7 @@ require (
require (
github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -39,7 +40,6 @@ require (
)
require (
- github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/containernetworking/cni v1.2.0-rc1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum
index 795621de221e..50cf57cc5b93 100644
--- a/pkg/apis/go.sum
+++ b/pkg/apis/go.sum
@@ -125,8 +125,9 @@ github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
-github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -470,8 +471,8 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck=
-github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU=
-github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk=
+github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA=
+github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8=
github.com/hashicorp/vault/api/auth/approle v0.5.0/go.mod h1:CHOQIA1AZACfjTzHggmyfiOZ+xCSKNRFqe48FTCzH0k=
github.com/hashicorp/vault/api/auth/approle v0.6.0 h1:ELfFFQlTM/e97WJKu1HvNFa7lQ3tlTwwzrR1NJE1V7Y=
github.com/hashicorp/vault/api/auth/approle v0.6.0/go.mod h1:CCoIl1xBC3lAWpd1HV+0ovk76Z8b8Mdepyk21h3pGk0=
@@ -502,8 +503,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1 h1:n4FpoJ6aGDx8ULfya/C4ycrMDuPZlf7AtPyrT4+rIP4=
-github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.1/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3 h1:W44yEuOvwcBErGzSjjVGEbmHh8oRGLmxDSC2yVJQ2aM=
+github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.3/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
From c8a64c49757aec1ddcad3aee1aa6109079bd1a47 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:45:51 +0000
Subject: [PATCH 19/40] build(deps): bump the k8s-dependencies group with 6
updates
Bumps the k8s-dependencies group with 6 updates:
| Package | From | To |
| --- | --- | --- |
| [k8s.io/api](https://github.com/kubernetes/api) | `0.31.0` | `0.31.1` |
| [k8s.io/apiextensions-apiserver](https://github.com/kubernetes/apiextensions-apiserver) | `0.31.0` | `0.31.1` |
| [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.31.0` | `0.31.1` |
| [k8s.io/cli-runtime](https://github.com/kubernetes/cli-runtime) | `0.31.0` | `0.31.1` |
| [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.31.0` | `0.31.1` |
| [k8s.io/cloud-provider](https://github.com/kubernetes/cloud-provider) | `0.31.0` | `0.31.1` |
Updates `k8s.io/api` from 0.31.0 to 0.31.1
- [Commits](https://github.com/kubernetes/api/compare/v0.31.0...v0.31.1)
Updates `k8s.io/apiextensions-apiserver` from 0.31.0 to 0.31.1
- [Release notes](https://github.com/kubernetes/apiextensions-apiserver/releases)
- [Commits](https://github.com/kubernetes/apiextensions-apiserver/compare/v0.31.0...v0.31.1)
Updates `k8s.io/apimachinery` from 0.31.0 to 0.31.1
- [Commits](https://github.com/kubernetes/apimachinery/compare/v0.31.0...v0.31.1)
Updates `k8s.io/cli-runtime` from 0.31.0 to 0.31.1
- [Commits](https://github.com/kubernetes/cli-runtime/compare/v0.31.0...v0.31.1)
Updates `k8s.io/client-go` from 0.31.0 to 0.31.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.31.0...v0.31.1)
Updates `k8s.io/cloud-provider` from 0.31.0 to 0.31.1
- [Commits](https://github.com/kubernetes/cloud-provider/compare/v0.31.0...v0.31.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
- dependency-name: k8s.io/apiextensions-apiserver
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
- dependency-name: k8s.io/apimachinery
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
- dependency-name: k8s.io/cli-runtime
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
- dependency-name: k8s.io/cloud-provider
dependency-type: direct:production
update-type: version-update:semver-patch
dependency-group: k8s-dependencies
...
Signed-off-by: dependabot[bot]
---
go.mod | 12 ++++++------
go.sum | 24 ++++++++++++------------
pkg/apis/go.mod | 6 +++---
pkg/apis/go.sum | 12 ++++++------
4 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/go.mod b/go.mod
index 789a8464d54b..85d836fed9d5 100644
--- a/go.mod
+++ b/go.mod
@@ -45,12 +45,12 @@ require (
golang.org/x/sync v0.8.0
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.31.0
- k8s.io/apiextensions-apiserver v0.31.0
- k8s.io/apimachinery v0.31.0
- k8s.io/cli-runtime v0.31.0
- k8s.io/client-go v0.31.0
- k8s.io/cloud-provider v0.31.0
+ k8s.io/api v0.31.1
+ k8s.io/apiextensions-apiserver v0.31.1
+ k8s.io/apimachinery v0.31.1
+ k8s.io/cli-runtime v0.31.1
+ k8s.io/client-go v0.31.1
+ k8s.io/cloud-provider v0.31.1
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.19.0
sigs.k8s.io/mcs-api v0.1.0
diff --git a/go.sum b/go.sum
index ccec62e8dcf6..75f7f6a2888b 100644
--- a/go.sum
+++ b/go.sum
@@ -1604,15 +1604,15 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
-k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
-k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
+k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
+k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
k8s.io/apiextensions-apiserver v0.18.4/go.mod h1:NYeyeYq4SIpFlPxSAB6jHPIdvu3hL0pc36wuRChybio=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
-k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
-k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
+k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40=
+k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
@@ -1624,14 +1624,14 @@ k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
-k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
+k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/cli-runtime v0.31.0 h1:V2Q1gj1u3/WfhD475HBQrIYsoryg/LrhhK4RwpN+DhA=
-k8s.io/cli-runtime v0.31.0/go.mod h1:vg3H94wsubuvWfSmStDbekvbla5vFGC+zLWqcf+bGDw=
+k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk=
+k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g=
@@ -1640,10 +1640,10 @@ k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
-k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
-k8s.io/cloud-provider v0.31.0 h1:qNOs78I2/7zQmyStfDtY2M7EdilUl9fCSYMcqBju/tA=
-k8s.io/cloud-provider v0.31.0/go.mod h1:QgUPqLoL6aXhLlrNg1U4IrJk/PvvxgeOnT2ixkgnqT0=
+k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
+k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
+k8s.io/cloud-provider v0.31.1 h1:40b6AgDizwm5eWratZbqubTHMob25VWr6NX2Ei5TwZA=
+k8s.io/cloud-provider v0.31.1/go.mod h1:xAdkE7fdZdu9rKLuOZUMBfagu7bM+bas3iPux/2nLGg=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 27790be77987..0748da9216ee 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -21,8 +21,8 @@ require (
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.9.0
- k8s.io/api v0.31.0
- k8s.io/apimachinery v0.31.0
+ k8s.io/api v0.31.1
+ k8s.io/apimachinery v0.31.1
)
require (
@@ -33,7 +33,7 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/client-go v0.31.0 // indirect
+ k8s.io/client-go v0.31.1 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum
index 795621de221e..288ff694accf 100644
--- a/pkg/apis/go.sum
+++ b/pkg/apis/go.sum
@@ -1409,8 +1409,8 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
-k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
-k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
+k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
+k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
@@ -1423,8 +1423,8 @@ k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
-k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
+k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
+k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
@@ -1433,8 +1433,8 @@ k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
-k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
+k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
+k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
From 1e3d08f11409dcf3e1f0119fe9388ba840bb98d6 Mon Sep 17 00:00:00 2001
From: Niraj Yadav
Date: Thu, 12 Sep 2024 18:25:09 +0530
Subject: [PATCH 20/40] docs: add documentation for volumegroupsnapshot
This PR adds the sample YAMLs and documentation
for the VolumeGroupSnapshot feature.
Signed-off-by: Niraj Yadav
---
.../Storage-Configuration/Ceph-CSI/.pages | 1 +
.../Ceph-CSI/ceph-csi-snapshot.md | 5 -
.../ceph-csi-volume-group-snapshot.md | 109 ++++++++++++++++++
deploy/examples/csi/cephfs/groupsnapshot.yaml | 13 +++
.../csi/cephfs/groupsnapshotclass.yaml | 15 +++
deploy/examples/csi/cephfs/pvc.yaml | 2 +
6 files changed, 140 insertions(+), 5 deletions(-)
create mode 100644 Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-volume-group-snapshot.md
create mode 100644 deploy/examples/csi/cephfs/groupsnapshot.yaml
create mode 100644 deploy/examples/csi/cephfs/groupsnapshotclass.yaml
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/.pages b/Documentation/Storage-Configuration/Ceph-CSI/.pages
index a117ff7ddd87..47db28e1cd34 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/.pages
+++ b/Documentation/Storage-Configuration/Ceph-CSI/.pages
@@ -1,6 +1,7 @@
nav:
- ceph-csi-drivers.md
- ceph-csi-snapshot.md
+ - ceph-csi-volume-group-snapshot.md
- ceph-csi-volume-clone.md
- custom-images.md
- ...
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md
index 1a8d888d14ce..c436b7198821 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md
@@ -178,8 +178,3 @@ kubectl delete -f deploy/examples/csi/cephfs/pvc-restore.yaml
kubectl delete -f deploy/examples/csi/cephfs/snapshot.yaml
kubectl delete -f deploy/examples/csi/cephfs/snapshotclass.yaml
```
-
-## Limitations
-
-- There is a limit of 400 snapshots per cephFS filesystem.
-- The PVC cannot be deleted if it has snapshots. make sure all the snapshots on the PVC are deleted before you delete the PVC.
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-volume-group-snapshot.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-volume-group-snapshot.md
new file mode 100644
index 000000000000..33d58b807fa2
--- /dev/null
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-volume-group-snapshot.md
@@ -0,0 +1,109 @@
+---
+title: Volume Group Snapshots
+---
+
+Ceph provides the ability to create crash-consistent snapshots of multiple volumes.
+A group snapshot represents “copies” from multiple volumes that are taken at the same point in time.
+A group snapshot can be used either to rehydrate new volumes (pre-populated with the snapshot data)
+or to restore existing volumes to a previous state (represented by the snapshots)
+
+
+## Prerequisites
+
+- Install the [snapshot controller, volume group snapshot and snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/master#usage),
+refer to VolumeGroupSnapshot documentation
+[here](https://github.com/kubernetes-csi/external-snapshotter/tree/master#volume-group-snapshot-support) for more details.
+
+- A `VolumeGroupSnapshotClass` is needed for the volume group snapshot to work. The purpose of a `VolumeGroupSnapshotClass` is
+defined in [the kubernetes
+documentation](https://kubernetes.io/blog/2023/05/08/kubernetes-1-27-volume-group-snapshot-alpha/).
+In short, as the documentation describes it:
+
+!!! info
+ Created by cluster administrators to describe how volume group snapshots
+ should be created. including the driver information, the deletion policy, etc.
+
+## Volume Group Snapshots
+
+### CephFS VolumeGroupSnapshotClass
+
+In [VolumeGroupSnapshotClass](https://github.com/rook/rook/tree/master/deploy/examples/csi/cephfs/groupsnapshotclass.yaml),
+the `csi.storage.k8s.io/group-snapshotter-secret-name` parameter should reference the
+name of the secret created for the cephfs-plugin.
+
+In the `VolumeGroupSnapshotClass`, update the value of the `clusterID` field to match the namespace
+that Rook is running in. When Ceph CSI is deployed by Rook, the operator will automatically
+maintain a configmap whose contents will match this key. By default this is
+"rook-ceph".
+
+```console
+kubectl create -f deploy/examples/csi/cephfs/groupsnapshotclass.yaml
+```
+
+### CephFS VolumeGroupSnapshot
+
+In [VolumeGroupSnapshot](https://github.com/rook/rook/tree/master/deploy/examples/csi/cephfs/groupsnapshot.yaml),
+`volumeGroupSnapshotClassName` should be the name of the `VolumeGroupSnapshotClass`
+previously created. The labels inside `matchLabels` should be present on the
+PVCs that are already created by the CephFS CSI driver.
+
+```console
+kubectl create -f deploy/examples/csi/cephfs/groupsnapshot.yaml
+```
+
+### Verify CephFS GroupSnapshot Creation
+
+```console
+$ kubectl get volumegroupsnapshotclass
+NAME DRIVER DELETIONPOLICY AGE
+csi-cephfsplugin-groupsnapclass rook-ceph.cephfs.csi.ceph.com Delete 21m
+```
+
+```console
+$ kubectl get volumegroupsnapshot
+NAME READYTOUSE VOLUMEGROUPSNAPSHOTCLASS VOLUMEGROUPSNAPSHOTCONTENT CREATIONTIME AGE
+cephfs-groupsnapshot true csi-cephfsplugin-groupsnapclass groupsnapcontent-d13f4d95-8822-4729-9586-4f222a3f788e 5m37s 5m39s
+```
+
+The snapshot will be ready to restore to a new PVC when `READYTOUSE` field of the
+`volumegroupsnapshot` is set to true.
+
+### Restore the CephFS volume group snapshot to a new PVC
+
+Find the name of the snapshots created by the `VolumeGroupSnapshot` first by running:
+
+```console
+$ kubectl get volumegroupsnapshot/cephfs-groupsnapshot -o=jsonpath='{range .status.pvcVolumeSnapshotRefList[*]}PVC: {.persistentVolumeClaimRef.name}, Snapshot: {.volumeSnapshotRef.name}{"\n"}{end}'
+PVC: cephfs-pvc, Snapshot: snapshot-9d21b143904c10f49ddc92664a7e8fe93c23387d0a88549c14337484ebaf1011-2024-09-12-3.49.13
+```
+
+It will list the PVC's name followed by its snapshot name.
+
+In
+[pvc-restore](https://github.com/rook/rook/tree/master/deploy/examples/csi/cephfs/pvc-restore.yaml),
+`dataSource` should be one of the `Snapshot` that we just
+found. The `dataSource` kind should be the `VolumeSnapshot`.
+
+Create a new PVC from the snapshot
+
+```console
+kubectl create -f deploy/examples/csi/cephfs/pvc-restore.yaml
+```
+
+### Verify CephFS Restore PVC Creation
+
+```console
+$ kubectl get pvc
+cephfs-pvc Bound pvc-9ae60bf9-4931-4f9a-9de1-7f45f31fe4da 1Gi RWO rook-cephfs 171m
+cephfs-pvc-restore Bound pvc-b4b73cbb-5061-48c7-9ac8-e1202508cf97 1Gi RWO rook-cephfs 46s
+```
+
+## CephFS volume group snapshot resource Cleanup
+
+To clean the resources created by this example, run the following:
+
+```console
+kubectl delete -f deploy/examples/csi/cephfs/pvc-restore.yaml
+kubectl delete -f deploy/examples/csi/cephfs/groupsnapshot.yaml
+kubectl delete -f deploy/examples/csi/cephfs/groupsnapshotclass.yaml
+```
diff --git a/deploy/examples/csi/cephfs/groupsnapshot.yaml b/deploy/examples/csi/cephfs/groupsnapshot.yaml
new file mode 100644
index 000000000000..98a8a6d46cfe
--- /dev/null
+++ b/deploy/examples/csi/cephfs/groupsnapshot.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1alpha1
+kind: VolumeGroupSnapshot
+metadata:
+ name: cephfs-groupsnapshot
+spec:
+ source:
+ selector:
+ matchLabels:
+ # The PVCs require this label for them to be
+ # included in the VolumeGroupSnapshot
+ group: snapshot-test
+ volumeGroupSnapshotClassName: csi-cephfsplugin-groupsnapclass
diff --git a/deploy/examples/csi/cephfs/groupsnapshotclass.yaml b/deploy/examples/csi/cephfs/groupsnapshotclass.yaml
new file mode 100644
index 000000000000..fe8d2e775c24
--- /dev/null
+++ b/deploy/examples/csi/cephfs/groupsnapshotclass.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1alpha1
+kind: VolumeGroupSnapshotClass
+metadata:
+ name: csi-cephfsplugin-groupsnapclass
+driver: rook-ceph.cephfs.csi.ceph.com # csi-provisioner-name
+parameters:
+ # Specify a string that identifies your cluster. Ceph CSI supports any
+ # unique string. When Ceph CSI is deployed by Rook use the Rook namespace,
+ # for example "rook-ceph".
+ clusterID: rook-ceph # namespace: cluster
+ fsName: myfs
+ csi.storage.k8s.io/group-snapshotter-secret-name: rook-csi-cephfs-provisioner
+ csi.storage.k8s.io/group-snapshotter-secret-namespace: rook-ceph
+deletionPolicy: Delete
diff --git a/deploy/examples/csi/cephfs/pvc.yaml b/deploy/examples/csi/cephfs/pvc.yaml
index 0f6addb69fae..cf3057de3115 100644
--- a/deploy/examples/csi/cephfs/pvc.yaml
+++ b/deploy/examples/csi/cephfs/pvc.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cephfs-pvc
+ labels:
+ group: snapshot-test
spec:
accessModes:
- ReadWriteOnce
From 32c5d99ef565e9aaf88f5f1f36c4aeca083b8347 Mon Sep 17 00:00:00 2001
From: subhamkrai
Date: Tue, 17 Sep 2024 15:03:21 +0530
Subject: [PATCH 21/40] external: mds caps to healthchecker/cephfs users
for cephfs fencing in external mode, we run command `ceph tell mds ***`
which requires user user to include cap mds allow * in permission. So,
we need to add this in healthchercker and cephfs provisioner user
Signed-off-by: subhamkrai
---
.../create-external-cluster-resources.py | 34 ++++++++++---------
1 file changed, 18 insertions(+), 16 deletions(-)
diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py
index 45b972d1114c..268176c4f607 100644
--- a/deploy/examples/create-external-cluster-resources.py
+++ b/deploy/examples/create-external-cluster-resources.py
@@ -96,8 +96,8 @@ def _init_cmd_output_map(self):
"""{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}"""
)
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
- ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]"""
+ """{"caps": ["mon", "allow r, allow command quorum_status", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index", "mds", "allow *"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
+ ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index","mds":"allow *"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
@@ -108,20 +108,20 @@ def _init_cmd_output_map(self):
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
- ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
+ """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*", "mds", "allow *"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
+ ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*","mds":"allow *"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}"""
- ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
+ """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*", "mds", "allow *"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}"""
+ ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*", "mds":"allow *"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}"""
- ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]"""
+ """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs", "mds", "allow *"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}"""
+ ] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs","mds":"allow *"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
- ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
+ """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index", "mds", "allow *"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
+ ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index","mds":"allow *"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}"""
- ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSRKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
+ """{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index", "mds", "allow *"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}"""
+ ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSRKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index","mds":"allow *"}}]"""
self.cmd_output_map["""{"format": "json", "prefix": "mgr services"}"""] = (
"""{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
)
@@ -130,7 +130,7 @@ def _init_cmd_output_map(self):
] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
self.cmd_output_map[
"""{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
- ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
+ ] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index","mds":"allow *"}}]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}"""
] = """[]"""
@@ -151,10 +151,10 @@ def _init_cmd_output_map(self):
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
- ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
+ ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*","mds":"allow *"}}]"""
self.cmd_output_map[
- """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}"""
- ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
+ """{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*", "mds", "allow *"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}"""
+ ] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*","mds":"allow *"}}]"""
self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
def shutdown(self):
@@ -868,6 +868,7 @@ def get_cephfs_provisioner_caps_and_entity(self):
"mon": "allow r, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "allow rw tag cephfs metadata=*",
+ "mds": "allow *",
}
if self._arg_parser.restricted_auth_permission:
k8s_cluster_name = self._arg_parser.k8s_cluster_name
@@ -1017,6 +1018,7 @@ def get_defaultUser_caps_and_entity(self):
"mon": "allow r, allow command quorum_status, allow command version",
"mgr": "allow command config",
"osd": f"profile rbd-read-only, allow rwx pool={self._arg_parser.rgw_pool_prefix}.rgw.meta, allow r pool=.rgw.root, allow rw pool={self._arg_parser.rgw_pool_prefix}.rgw.control, allow rx pool={self._arg_parser.rgw_pool_prefix}.rgw.log, allow x pool={self._arg_parser.rgw_pool_prefix}.rgw.buckets.index",
+ "mds": "allow *",
}
return caps, entity
From fa2941d098f98fafc9529ee6c6f989ad29fce27e Mon Sep 17 00:00:00 2001
From: Travis Nielsen
Date: Tue, 17 Sep 2024 14:24:40 -0600
Subject: [PATCH 22/40] core: enable annotations on crash collector
The crash collector daemon had missed the ability to
add custom annotations via the cephcluster CR, as already
supported for all the other daemons.
Signed-off-by: Travis Nielsen
---
pkg/apis/ceph.rook.io/v1/annotations.go | 5 +++++
pkg/apis/ceph.rook.io/v1/annotations_test.go | 11 ++++++++---
pkg/operator/ceph/cluster/nodedaemon/crash.go | 1 +
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/pkg/apis/ceph.rook.io/v1/annotations.go b/pkg/apis/ceph.rook.io/v1/annotations.go
index 9610420be28c..efd40793f01f 100644
--- a/pkg/apis/ceph.rook.io/v1/annotations.go
+++ b/pkg/apis/ceph.rook.io/v1/annotations.go
@@ -77,6 +77,11 @@ func GetCmdReporterAnnotations(a AnnotationsSpec) Annotations {
return mergeAllAnnotationsWithKey(a, KeyCmdReporter)
}
+// GetCrashCollectorAnnotations returns the Annotations for the crash collector
+func GetCrashCollectorAnnotations(a AnnotationsSpec) Annotations {
+ return mergeAllAnnotationsWithKey(a, KeyCrashCollector)
+}
+
func GetClusterMetadataAnnotations(a AnnotationsSpec) Annotations {
return a[KeyClusterMetadata]
}
diff --git a/pkg/apis/ceph.rook.io/v1/annotations_test.go b/pkg/apis/ceph.rook.io/v1/annotations_test.go
index b832690c3567..c3137d8ed549 100644
--- a/pkg/apis/ceph.rook.io/v1/annotations_test.go
+++ b/pkg/apis/ceph.rook.io/v1/annotations_test.go
@@ -58,9 +58,10 @@ func TestCephAnnotationsMerge(t *testing.T) {
// Merge with "all"
testAnnotations = AnnotationsSpec{
- "all": {"allkey1": "allval1", "allkey2": "allval2"},
- "mgr": {"mgrkey": "mgrval"},
- "cmdreporter": {"myversions": "detect"},
+ "all": {"allkey1": "allval1", "allkey2": "allval2"},
+ "mgr": {"mgrkey": "mgrval"},
+ "cmdreporter": {"myversions": "detect"},
+ "crashcollector": {"crash": "crashval"},
}
a = GetMonAnnotations(testAnnotations)
assert.Equal(t, "allval1", a["allkey1"])
@@ -75,6 +76,10 @@ func TestCephAnnotationsMerge(t *testing.T) {
assert.Equal(t, "detect", b["myversions"])
assert.Equal(t, "allval1", b["allkey1"])
assert.Equal(t, "allval2", b["allkey2"])
+ c := GetCrashCollectorAnnotations(testAnnotations)
+ assert.Equal(t, "crashval", c["crash"])
+ assert.Equal(t, "allval1", c["allkey1"])
+ assert.Equal(t, "allval2", c["allkey2"])
}
func TestAnnotationsSpec(t *testing.T) {
diff --git a/pkg/operator/ceph/cluster/nodedaemon/crash.go b/pkg/operator/ceph/cluster/nodedaemon/crash.go
index 97abb34a3f9d..87ed94fb7ee8 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/crash.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/crash.go
@@ -124,6 +124,7 @@ func (r *ReconcileNode) createOrUpdateCephCrash(node corev1.Node, tolerations []
ServiceAccountName: k8sutil.DefaultServiceAccount,
},
}
+ cephv1.GetCrashCollectorAnnotations(cephCluster.Spec.Annotations).ApplyToObjectMeta(&deploy.Spec.Template.ObjectMeta)
return nil
}
From afad40e404ae2af7ab0419fc6923a31fe59a2383 Mon Sep 17 00:00:00 2001
From: Praveen M
Date: Wed, 18 Sep 2024 11:40:07 +0530
Subject: [PATCH 23/40] csi: update csi-addons to v0.10.0
The csi-addons v0.10.0 release is now available.
Ref: https://github.com/csi-addons/kubernetes-csi-addons/releases/tag/v0.10.0
Signed-off-by: Praveen M
---
Documentation/Helm-Charts/operator-chart.md | 2 +-
.../Ceph-CSI/ceph-csi-drivers.md | 32 +++++++++----------
.../Ceph-CSI/custom-images.md | 2 +-
deploy/charts/rook-ceph/values.yaml | 2 +-
deploy/examples/images.txt | 2 +-
deploy/examples/operator-openshift.yaml | 2 +-
deploy/examples/operator.yaml | 2 +-
go.mod | 6 ++--
go.sum | 16 +++++-----
pkg/apis/go.mod | 6 ++--
pkg/apis/go.sum | 14 ++++----
pkg/operator/ceph/csi/spec.go | 2 +-
tests/scripts/csiaddons.sh | 2 +-
13 files changed, 45 insertions(+), 45 deletions(-)
diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md
index 7ff471e7df16..ebb42204fb1f 100644
--- a/Documentation/Helm-Charts/operator-chart.md
+++ b/Documentation/Helm-Charts/operator-chart.md
@@ -67,7 +67,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `csi.clusterName` | Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster | `nil` |
| `csi.csiAddons.enabled` | Enable CSIAddons | `false` |
| `csi.csiAddons.repository` | CSIAddons sidecar image repository | `"quay.io/csiaddons/k8s-sidecar"` |
-| `csi.csiAddons.tag` | CSIAddons sidecar image tag | `"v0.9.1"` |
+| `csi.csiAddons.tag` | CSIAddons sidecar image tag | `"v0.10.0"` |
| `csi.csiAddonsPort` | CSI Addons server port | `9070` |
| `csi.csiCephFSPluginResource` | CEPH CSI CephFS plugin resource requirement list | see values.yaml |
| `csi.csiCephFSPluginVolume` | The volume of the CephCSI CephFS plugin DaemonSet | `nil` |
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
index dc5df80b8847..d06bb58ae9d4 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
@@ -166,9 +166,9 @@ that the controller inspects and forwards to one or more CSI-Addons sidecars for
Deploy the controller by running the following commands:
```console
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.1/crds.yaml
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.1/rbac.yaml
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.1/setup-controller.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/crds.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/rbac.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.10.0/setup-controller.yaml
```
This creates the required CRDs and configures permissions.
@@ -196,22 +196,22 @@ Execute the following to enable the CSI-Addons sidecars:
CSI-Addons supports the following operations:
* Reclaim Space
- * [Creating a ReclaimSpaceJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#reclaimspacejob)
- * [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#reclaimspacecronjob)
- * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-perstentvolumeclaims)
- * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-namespace)
- * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/reclaimspace.md#annotating-storageclass)
+ * [Creating a ReclaimSpaceJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/reclaimspace.md#reclaimspacejob)
+ * [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/reclaimspace.md#reclaimspacecronjob)
+ * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/reclaimspace.md#annotating-perstentvolumeclaims)
+ * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/reclaimspace.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/reclaimspace.md#annotating-storageclass)
* Network Fencing
- * [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/networkfence.md)
+ * [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/networkfence.md)
* Volume Replication
- * [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplicationclass.md)
- * [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/volumereplication.md)
+ * [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/volumereplicationclass.md)
+ * [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/volumereplication.md)
* Key Rotation Job for PV encryption
- * [Creating EncryptionKeyRotationJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationjob)
- * [Creating EncryptionKeyRotationCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#encryptionkeyrotationcronjob)
- * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-persistentvolumeclaims)
- * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-namespace)
- * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.1/docs/encryptionkeyrotation.md#annotating-storageclass)
+ * [Creating EncryptionKeyRotationJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/encryptionkeyrotation.md#encryptionkeyrotationjob)
+ * [Creating EncryptionKeyRotationCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/encryptionkeyrotation.md#encryptionkeyrotationcronjob)
+ * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/encryptionkeyrotation.md#annotating-persistentvolumeclaims)
+ * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/encryptionkeyrotation.md#annotating-namespace)
+ * [Annotating StorageClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.10.0/docs/encryptionkeyrotation.md#annotating-storageclass)
## Enable RBD and CephFS Encryption Support
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
index 8629bbcc1b55..86beab48d9a1 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
@@ -24,7 +24,7 @@ ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
-ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.1"
+ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.10.0"
```
### **Use private repository**
diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml
index 387712a37e2f..4cf7c298d6a4 100644
--- a/deploy/charts/rook-ceph/values.yaml
+++ b/deploy/charts/rook-ceph/values.yaml
@@ -537,7 +537,7 @@ csi:
# -- CSIAddons sidecar image repository
repository: quay.io/csiaddons/k8s-sidecar
# -- CSIAddons sidecar image tag
- tag: v0.9.1
+ tag: v0.10.0
nfs:
# -- Enable the nfs csi driver
diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt
index faab1198c12c..b84f8adbdd97 100644
--- a/deploy/examples/images.txt
+++ b/deploy/examples/images.txt
@@ -3,7 +3,7 @@
quay.io/ceph/ceph:v18.2.4
quay.io/ceph/cosi:v0.1.2
quay.io/cephcsi/cephcsi:v3.12.2
- quay.io/csiaddons/k8s-sidecar:v0.9.1
+ quay.io/csiaddons/k8s-sidecar:v0.10.0
registry.k8s.io/sig-storage/csi-attacher:v4.6.1
registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1
registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml
index 396f022dd420..82ed8f14f141 100644
--- a/deploy/examples/operator-openshift.yaml
+++ b/deploy/examples/operator-openshift.yaml
@@ -558,7 +558,7 @@ data:
CSI_ENABLE_CSIADDONS: "false"
# Enable watch for faster recovery from rbd rwo node loss
ROOK_WATCH_FOR_NODE_FAILURE: "true"
- # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.1"
+ # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.10.0"
# The GCSI RPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml
index 1d60f5638559..e0faa4d2a6b4 100644
--- a/deploy/examples/operator.yaml
+++ b/deploy/examples/operator.yaml
@@ -508,7 +508,7 @@ data:
CSI_ENABLE_CSIADDONS: "false"
# Enable watch for faster recovery from rbd rwo node loss
ROOK_WATCH_FOR_NODE_FAILURE: "true"
- # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.1"
+ # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.10.0"
# The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
diff --git a/go.mod b/go.mod
index bc4e4229b027..add877c686ef 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,8 @@
module github.com/rook/rook
-go 1.22.0
+go 1.22.6
-toolchain go1.22.5
+toolchain go1.22.7
replace (
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1
@@ -21,7 +21,7 @@ require (
github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c
github.com/ceph/go-ceph v0.29.0
github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8
- github.com/csi-addons/kubernetes-csi-addons v0.9.1
+ github.com/csi-addons/kubernetes-csi-addons v0.10.0
github.com/gemalto/kmip-go v0.0.10
github.com/go-ini/ini v1.67.0
github.com/google/go-cmp v0.6.0
diff --git a/go.sum b/go.sum
index f52f6da23a6c..b58bb5fb030f 100644
--- a/go.sum
+++ b/go.sum
@@ -214,8 +214,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/csi-addons/kubernetes-csi-addons v0.9.1 h1:2m8/Pls7Ws3ld1zr/w6lL6BWwsXqqLg2JnW0jP8AX6I=
-github.com/csi-addons/kubernetes-csi-addons v0.9.1/go.mod h1:32kTa/Ngp7hMK2GEjx+Zk8yfKupR5WG4JG+oRzkM1TM=
+github.com/csi-addons/kubernetes-csi-addons v0.10.0 h1:bBc6nb1oROz4RLhqoLFNeGymk2jIRXcx7LvAup9+3Jg=
+github.com/csi-addons/kubernetes-csi-addons v0.10.0/go.mod h1:nqi369YuYMIdysBbHjtYJcWFpcxujPot1HS6tnNWBV4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -465,8 +465,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
-github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -724,8 +724,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
-github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
-github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -740,8 +740,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
-github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
-github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
+github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
+github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 495151118d30..0230510aa324 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -1,8 +1,8 @@
module github.com/rook/rook/pkg/apis
-go 1.22.0
+go 1.22.6
-toolchain go1.22.5
+toolchain go1.22.7
replace (
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1
@@ -32,6 +32,8 @@ require (
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.6.0 // indirect
+ github.com/onsi/ginkgo/v2 v2.20.2 // indirect
+ github.com/onsi/gomega v1.34.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/client-go v0.31.1 // indirect
diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum
index 0e8f95c6872e..f0be261e2938 100644
--- a/pkg/apis/go.sum
+++ b/pkg/apis/go.sum
@@ -388,8 +388,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
-github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -610,8 +610,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
-github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
-github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -625,8 +625,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
-github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
-github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
+github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
+github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
@@ -820,8 +820,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go
index ab029bce5ec4..de30e74a87c0 100644
--- a/pkg/operator/ceph/csi/spec.go
+++ b/pkg/operator/ceph/csi/spec.go
@@ -156,7 +156,7 @@ var (
DefaultAttacherImage = "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
DefaultSnapshotterImage = "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
DefaultResizerImage = "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
- DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.9.1"
+ DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.10.0"
// image pull policy
DefaultCSIImagePullPolicy = string(corev1.PullIfNotPresent)
diff --git a/tests/scripts/csiaddons.sh b/tests/scripts/csiaddons.sh
index 60daf61e217e..6b91a847b289 100755
--- a/tests/scripts/csiaddons.sh
+++ b/tests/scripts/csiaddons.sh
@@ -16,7 +16,7 @@
set -xEo pipefail
-CSIADDONS_VERSION="v0.9.1"
+CSIADDONS_VERSION="v0.10.0"
CSIADDONS_CRD_NAME="csiaddonsnodes.csiaddons.openshift.io"
CSIADDONS_CONTAINER_NAME="csi-addons"
From f6474445152822ff1635fe33cb84565b254031e0 Mon Sep 17 00:00:00 2001
From: subhamkrai
Date: Thu, 19 Sep 2024 15:03:59 +0530
Subject: [PATCH 24/40] ci: fix ci permission issue with minikube start
this commit upgrade the minikube, k8s, crictl versions
in CI and also fix permission error in the github runner.
Signed-off-by: subhamkrai
---
.github/workflows/canary-test-config/action.yaml | 2 +-
tests/scripts/github-action-helper.sh | 14 +++++++-------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/canary-test-config/action.yaml b/.github/workflows/canary-test-config/action.yaml
index e38c6f8c3484..358e08c02eb1 100644
--- a/.github/workflows/canary-test-config/action.yaml
+++ b/.github/workflows/canary-test-config/action.yaml
@@ -19,7 +19,7 @@ runs:
- name: Setup Minikube
shell: bash --noprofile --norc -eo pipefail -x {0}
run: |
- tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.30.0
+ tests/scripts/github-action-helper.sh install_minikube_with_none_driver v1.31.0
- name: install deps
shell: bash --noprofile --norc -eo pipefail -x {0}
diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh
index cd10c8bb1a14..277a6f5f5739 100755
--- a/tests/scripts/github-action-helper.sh
+++ b/tests/scripts/github-action-helper.sh
@@ -707,8 +707,8 @@ function test_csi_nfs_workload {
}
function install_minikube_with_none_driver() {
- CRICTL_VERSION="v1.30.0"
- MINIKUBE_VERSION="v1.32.0"
+ CRICTL_VERSION="v1.31.1"
+ MINIKUBE_VERSION="v1.34.0"
sudo apt update
sudo apt install -y conntrack socat
@@ -716,16 +716,16 @@ function install_minikube_with_none_driver() {
sudo dpkg -i minikube_latest_amd64.deb
rm -f minikube_latest_amd64.deb
- curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb
- sudo dpkg -i cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb
- rm -f cri-dockerd_0.3.9.3-0.ubuntu-focal_amd64.deb
+ curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.15/cri-dockerd_0.3.15.3-0.ubuntu-focal_amd64.deb
+ sudo dpkg -i cri-dockerd_0.3.15.3-0.ubuntu-focal_amd64.deb
+ rm -f cri-dockerd_0.3.15.3-0.ubuntu-focal_amd64.deb
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/$CRICTL_VERSION/crictl-$CRICTL_VERSION-linux-amd64.tar.gz
sudo tar zxvf crictl-$CRICTL_VERSION-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-$CRICTL_VERSION-linux-amd64.tar.gz
sudo sysctl fs.protected_regular=0
- CNI_PLUGIN_VERSION="v1.4.0"
+ CNI_PLUGIN_VERSION="v1.5.1"
CNI_PLUGIN_TAR="cni-plugins-linux-amd64-$CNI_PLUGIN_VERSION.tgz" # change arch if not on amd64
CNI_PLUGIN_INSTALL_DIR="/opt/cni/bin"
@@ -735,7 +735,7 @@ function install_minikube_with_none_driver() {
rm "$CNI_PLUGIN_TAR"
export MINIKUBE_HOME=$HOME CHANGE_MINIKUBE_NONE_USER=true KUBECONFIG=$HOME/.kube/config
- sudo -E minikube start --kubernetes-version="$1" --driver=none --memory 6g --cpus=2 --addons ingress --cni=calico
+ minikube start --kubernetes-version="$1" --driver=none --memory 6g --cpus=2 --addons ingress --cni=calico
}
FUNCTION="$1"
From 99337b648d61d65d354324ba1d9d584cef52e908 Mon Sep 17 00:00:00 2001
From: subhamkrai
Date: Fri, 20 Sep 2024 17:03:37 +0530
Subject: [PATCH 25/40] build: update toolchain to latest version
this commits update the toolchain version to go v1.22.7
Signed-off-by: subhamkrai
---
go.mod | 2 +-
pkg/apis/go.mod | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/go.mod b/go.mod
index 789a8464d54b..a46e225edce7 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/rook/rook
go 1.22.0
-toolchain go1.22.5
+toolchain go1.22.7
replace (
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 27790be77987..b1670dd52b88 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -2,7 +2,7 @@ module github.com/rook/rook/pkg/apis
go 1.22.0
-toolchain go1.22.5
+toolchain go1.22.7
replace (
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1
From 467578d368763a9c0964b6a669407747881eee7b Mon Sep 17 00:00:00 2001
From: parth-gr
Date: Thu, 12 Sep 2024 13:33:52 +0530
Subject: [PATCH 26/40] docs: update external docs with a better structure
step1 to improve the external docs
and make it more clear to end user
Signed-off-by: parth-gr
---
.../CRDs/Cluster/external-cluster/.pages | 4 +
.../external-cluster/advance-external.md | 70 ++++
.../external-cluster/consumer-import.md | 76 ++++
.../external-cluster/external-cluster.md | 324 +-----------------
.../external-cluster/provider-export.md | 125 +++++++
.../external-cluster/upgrade-external.md | 58 ++++
6 files changed, 345 insertions(+), 312 deletions(-)
create mode 100644 Documentation/CRDs/Cluster/external-cluster/advance-external.md
create mode 100644 Documentation/CRDs/Cluster/external-cluster/consumer-import.md
create mode 100644 Documentation/CRDs/Cluster/external-cluster/provider-export.md
create mode 100644 Documentation/CRDs/Cluster/external-cluster/upgrade-external.md
diff --git a/Documentation/CRDs/Cluster/external-cluster/.pages b/Documentation/CRDs/Cluster/external-cluster/.pages
index 5a3a6ca9e41c..a225b612db12 100644
--- a/Documentation/CRDs/Cluster/external-cluster/.pages
+++ b/Documentation/CRDs/Cluster/external-cluster/.pages
@@ -1,3 +1,7 @@
nav:
- external-cluster.md
+ - provider-export.md
+ - consumer-import.md
+ - upgrade-external.md
+ - advance-external.md
- topology-for-external-mode.md
diff --git a/Documentation/CRDs/Cluster/external-cluster/advance-external.md b/Documentation/CRDs/Cluster/external-cluster/advance-external.md
new file mode 100644
index 000000000000..5034b0da9a69
--- /dev/null
+++ b/Documentation/CRDs/Cluster/external-cluster/advance-external.md
@@ -0,0 +1,70 @@
+# External Cluster Options
+
+## NFS storage
+
+Rook suggests a different mechanism for making use of an [NFS service running on the external Ceph standalone cluster](../../../Storage-Configuration/NFS/nfs-csi-driver.md#consuming-nfs-from-an-external-source), if desired.
+
+## Exporting Rook to another cluster
+
+If you have multiple K8s clusters running, and want to use the local `rook-ceph` cluster as the central storage,
+you can export the settings from this cluster with the following steps.
+
+1. Copy create-external-cluster-resources.py into the directory `/etc/ceph/` of the toolbox.
+
+ ```console
+ toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
+ kubectl -n rook-ceph cp deploy/examples/external/create-external-cluster-resources.py $toolbox:/etc/ceph
+ ```
+
+2. Exec to the toolbox pod and execute create-external-cluster-resources.py with needed options to create required [users and keys](/Documentation/CRDs/Cluster/external-cluster/provider-export.md#1-create-all-users-and-keys).
+
+!!! important
+ For other clusters to connect to storage in this cluster, Rook must be configured with a networking configuration that is accessible from other clusters. Most commonly this is done by enabling host networking in the CephCluster CR so the Ceph daemons will be addressable by their host IPs.
+
+## Admin privileges
+
+If in case the cluster needs the admin keyring to configure, update the admin key `rook-ceph-mon` secret with client.admin keyring
+
+!!! note
+ Sharing the admin key with the external cluster is not generally recommended
+
+1. Get the `client.admin` keyring from the ceph cluster
+
+ ```console
+ ceph auth get client.admin
+ ```
+
+2. Update two values in the `rook-ceph-mon` secret:
+ - `ceph-username`: Set to `client.admin`
+ - `ceph-secret`: Set the client.admin keyring
+
+After restarting the rook operator (and the toolbox if in use), rook will configure ceph with admin privileges.
+
+## Connect to an External Object Store
+
+Create the [external object store CR](https://github.com/rook/rook/blob/master/deploy/examples/external/object-external.yaml) to configure connection to external gateways.
+
+```console
+cd deploy/examples/external
+kubectl create -f object-external.yaml
+```
+
+Consume the S3 Storage, in two different ways:
+
+1. Create an [Object store user](https://github.com/rook/rook/blob/master/deploy/examples/object-user.yaml) for credentials to access the S3 endpoint.
+
+ ```console
+ cd deploy/examples
+ kubectl create -f object-user.yaml
+ ```
+
+2. Create a [bucket storage class](https://github.com/rook/rook/blob/master/deploy/examples/external/storageclass-bucket-delete.yaml) where a client can request creating buckets and then create the [Object Bucket Claim](https://github.com/rook/rook/blob/master/deploy/examples/external/object-bucket-claim-delete.yaml), which will create an individual bucket for reading and writing objects.
+
+ ```console
+ cd deploy/examples/external
+ kubectl create -f storageclass-bucket-delete.yaml
+ kubectl create -f object-bucket-claim-delete.yaml
+ ```
+
+!!! hint
+ For more details see the [Object Store topic](../../../Storage-Configuration/Object-Storage-RGW/object-storage.md#connect-to-an-external-object-store)
diff --git a/Documentation/CRDs/Cluster/external-cluster/consumer-import.md b/Documentation/CRDs/Cluster/external-cluster/consumer-import.md
new file mode 100644
index 000000000000..3a8a5f352b47
--- /dev/null
+++ b/Documentation/CRDs/Cluster/external-cluster/consumer-import.md
@@ -0,0 +1,76 @@
+# Import Ceph configuration to the Rook consumer cluster
+
+## Installation types
+
+Install Rook in the the consumer cluster, either with [Helm](#helm-installation)
+or the [manifests](#manifest-installation).
+
+### Helm Installation
+
+To install with Helm, the rook cluster helm chart will configure the necessary resources for the external cluster with the example `values-external.yaml`.
+
+```console
+clusterNamespace=rook-ceph
+operatorNamespace=rook-ceph
+cd deploy/examples/charts/rook-ceph-cluster
+helm repo add rook-release https://charts.rook.io/release
+helm install --create-namespace --namespace $clusterNamespace rook-ceph rook-release/rook-ceph -f values.yaml
+helm install --create-namespace --namespace $clusterNamespace rook-ceph-cluster \
+--set operatorNamespace=$operatorNamespace rook-release/rook-ceph-cluster -f values-external.yaml
+```
+
+### Manifest Installation
+
+If not installing with Helm, here are the steps to install with manifests.
+
+1. Deploy Rook, create [common.yaml](https://github.com/rook/rook/blob/master/deploy/examples/common.yaml), [crds.yaml](https://github.com/rook/rook/blob/master/deploy/examples/crds.yaml) and [operator.yaml](https://github.com/rook/rook/blob/master/deploy/examples/operator.yaml) manifests.
+
+2. Create [common-external.yaml](https://github.com/rook/rook/blob/master/deploy/examples/external/common-external.yaml) and [cluster-external.yaml](https://github.com/rook/rook/blob/master/deploy/examples/external/cluster-external.yaml)
+
+## Import the Provider Data
+
+1. Paste the above output from `create-external-cluster-resources.py` into your current shell to allow importing the provider data.
+
+2. The import script in the next step uses the current kubeconfig context by
+ default. If you want to specify the kubernetes cluster to use without
+ changing the current context, you can specify the cluster name by setting
+ the KUBECONTEXT environment variable.
+
+ ```console
+ export KUBECONTEXT=
+ ```
+
+3. Here is the link for [import](https://github.com/rook/rook/blob/master/deploy/examples/external/import-external-cluster.sh) script. The script has used the `rook-ceph` namespace and few parameters that also have referenced from namespace variable. If user's external cluster has a different namespace, change the namespace parameter in the script according to their external cluster. For example with `new-namespace` namespace, this change is needed on the namespace parameter in the script.
+
+ ```console
+ NAMESPACE=${NAMESPACE:="new-namespace"}
+ ```
+
+4. Run the import script.
+
+ !!! note
+ If your Rook cluster nodes are running a kernel earlier than or equivalent to 5.4, remove
+ `fast-diff, object-map, deep-flatten,exclusive-lock` from the `imageFeatures` line.
+
+ ```console
+ . import-external-cluster.sh
+ ```
+
+## Cluster Verification
+
+1. Verify the consumer cluster is connected to the provider ceph cluster:
+
+ ```console
+ $ kubectl -n rook-ceph get CephCluster
+ NAME DATADIRHOSTPATH MONCOUNT AGE STATE HEALTH
+ rook-ceph-external /var/lib/rook 162m Connected HEALTH_OK
+ ```
+
+2. Verify the creation of the storage class depending on the rbd pools and filesystem provided.
+ `ceph-rbd` and `cephfs` would be the respective names for the RBD and CephFS storage classes.
+
+ ```console
+ kubectl -n rook-ceph get sc
+ ```
+
+3. Create a [persistent volume](https://github.com/rook/rook/tree/master/deploy/examples/csi) based on these StorageClass.
diff --git a/Documentation/CRDs/Cluster/external-cluster/external-cluster.md b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
index adc7405f2545..28dd279bfbf1 100644
--- a/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
+++ b/Documentation/CRDs/Cluster/external-cluster/external-cluster.md
@@ -8,9 +8,9 @@ In external mode, Rook will provide the configuration for the CSI driver and oth
## External configuration
-* Source cluster: The cluster providing the data, usually configured by [cephadm](https://docs.ceph.com/en/pacific/cephadm/#cephadm)
+* Provider cluster: The cluster providing the data, usually configured by [cephadm](https://docs.ceph.com/en/pacific/cephadm/#cephadm)
-* Consumer cluster: The K8s cluster that will be consuming the external source cluster
+* Consumer cluster: The K8s cluster that will be consuming the external provider cluster
## Prerequisites
@@ -19,324 +19,24 @@ Create the desired types of storage in the provider Ceph cluster:
* [RBD pools](https://docs.ceph.com/en/latest/rados/operations/pools/#create-a-pool)
* [CephFS filesystem](https://docs.ceph.com/en/quincy/cephfs/createfs/)
-## Commands on the source Ceph cluster
+## Connect the external Ceph Provider cluster to the Rook consumer cluster
-In order to configure an external Ceph cluster with Rook, we need to extract some information in order to connect to that cluster.
+1) [Export config from the Provider Ceph cluster](/Documentation/CRDs/Cluster/external-cluster/provider-export.md). Configuration must be exported by the Ceph admin, such as a Ceph keyring and mon endpoints, that allows connection to the Ceph cluster.
-### 1. Create all users and keys
+2) [Import config to the Rook consumer cluster](/Documentation/CRDs/Cluster/external-cluster/consumer-import.md). The configuration exported from the Ceph cluster is imported to Rook to provide the needed connection details.
-Run the python script [create-external-cluster-resources.py](https://github.com/rook/rook/blob/master/deploy/examples/external/create-external-cluster-resources.py) for creating all users and keys.
+## Advance Options
-```console
-python3 create-external-cluster-resources.py --rbd-data-pool-name --cephfs-filesystem-name --rgw-endpoint --namespace --format bash
-```
+* [NFS storage](/Documentation/CRDs/Cluster/external-cluster/advance-external.md#nfs-storage)
-* `--namespace`: Namespace where CephCluster will run, for example `rook-ceph`
-* `--format bash`: The format of the output
-* `--rbd-data-pool-name`: The name of the RBD data pool
-* `--alias-rbd-data-pool-name`: Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore
-* `--rgw-endpoint`: (optional) The RADOS Gateway endpoint in the format `:` or `:`.
-* `--rgw-pool-prefix`: (optional) The prefix of the RGW pools. If not specified, the default prefix is `default`
-* `--rgw-tls-cert-path`: (optional) RADOS Gateway endpoint TLS certificate file path
-* `--rgw-skip-tls`: (optional) Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED)
-* `--rbd-metadata-ec-pool-name`: (optional) Provides the name of erasure coded RBD metadata pool, used for creating ECRBDStorageClass.
-* `--monitoring-endpoint`: (optional) Ceph Manager prometheus exporter endpoints (comma separated list of IP entries of active and standby mgrs)
-* `--monitoring-endpoint-port`: (optional) Ceph Manager prometheus exporter port
-* `--skip-monitoring-endpoint`: (optional) Skip prometheus exporter endpoints, even if they are available. Useful if the prometheus module is not enabled
-* `--ceph-conf`: (optional) Provide a Ceph conf file
-* `--keyring`: (optional) Path to Ceph keyring file, to be used with `--ceph-conf`
-* `--k8s-cluster-name`: (optional) Kubernetes cluster name
-* `--output`: (optional) Output will be stored into the provided file
-* `--dry-run`: (optional) Prints the executed commands without running them
-* `--run-as-user`: (optional) Provides a user name to check the cluster's health status, must be prefixed by `client`.
-* `--cephfs-metadata-pool-name`: (optional) Provides the name of the cephfs metadata pool
-* `--cephfs-filesystem-name`: (optional) The name of the filesystem, used for creating CephFS StorageClass
-* `--cephfs-data-pool-name`: (optional) Provides the name of the CephFS data pool, used for creating CephFS StorageClass
-* `--rados-namespace`: (optional) Divides a pool into separate logical namespaces, used for creating RBD PVC in a CephBlockPoolRadosNamespace (should be lower case)
-* `--subvolume-group`: (optional) Provides the name of the subvolume group, used for creating CephFS PVC in a subvolumeGroup
-* `--rgw-realm-name`: (optional) Provides the name of the rgw-realm
-* `--rgw-zone-name`: (optional) Provides the name of the rgw-zone
-* `--rgw-zonegroup-name`: (optional) Provides the name of the rgw-zone-group
-* `--upgrade`: (optional) Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied.
-* `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem.
-* `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons.
-* `--topology-pools`: (optional) Comma-separated list of topology-constrained rbd pools
-* `--topology-failure-domain-label`: (optional) K8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain
-* `--topology-failure-domain-values`: (optional) Comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list
-* `--config-file`: Path to the configuration file, Priority: command-line-args > config.ini values > default values
+* [Exporting Rook to another cluster](/Documentation/CRDs/Cluster/external-cluster/advance-external.md#exporting-rook-to-another-cluster)
-### Config-file
+* [Run consumer Rook cluster with Admin privileges](/Documentation/CRDs/Cluster/external-cluster/advance-external.md#admin-privileges)
-Use the config file to set the user configuration file, add the flag `--config-file` to set the file path.
-
-Example:
-
-`/config.ini`
-
-```console
-[Configurations]
-format = bash
-cephfs-filesystem-name =
-rbd-data-pool-name =
-...
-```
-
-```console
-python3 create-external-cluster-resources.py --config-file /config.ini
-```
-
-!!! note
- You can use both config file and other arguments at the same time
- Priority: command-line-args > config.ini file values > default values
-
-
-### Multi-tenancy
-
-To enable multi-tenancy, run the script with the `--restricted-auth-permission` flag and pass the mandatory flags with it,
-It will generate the secrets which you can use for creating new `Consumer cluster` deployment using the same `Source cluster`(ceph cluster).
-So you would be running different isolated consumer clusters on top of single `Source cluster`.
-
-!!! note
- Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users.
- So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.
-
-```console
-python3 create-external-cluster-resources.py --cephfs-filesystem-name --rbd-data-pool-name --k8s-cluster-name --restricted-auth-permission true --format --rgw-endpoint --namespace
-```
-
-### RGW Multisite
-
-Pass the `--rgw-realm-name`, `--rgw-zonegroup-name` and `--rgw-zone-name` flags to create the admin ops user in a master zone, zonegroup and realm.
-See the [Multisite doc](https://docs.ceph.com/en/quincy/radosgw/multisite/#configuring-a-master-zone) for creating a zone, zonegroup and realm.
-
-```console
-python3 create-external-cluster-resources.py --rbd-data-pool-name --format bash --rgw-endpoint --rgw-realm-name > --rgw-zonegroup-name --rgw-zone-name >
-```
-
-### Topology Based Provisioning
-
-Enable Topology Based Provisioning for RBD pools by passing `--topology-pools`, `--topology-failure-domain-label` and `--topology-failure-domain-values` flags.
-A new storageclass named `ceph-rbd-topology` will be created by the import script with `volumeBindingMode: WaitForFirstConsumer`.
-The storageclass is used to create a volume in the pool matching the topology where a pod is scheduled.
-
-For more details, see the [Topology-Based Provisioning](topology-for-external-mode.md)
-
-### Admin privileges
-
-If in case the cluster needs the admin keyring to configure, update the admin key `rook-ceph-mon` secret with client.admin keyring
-
-!!! note
- Sharing the admin key with the external cluster is not generally recommended
-
-1. Get the `client.admin` keyring from the ceph cluster
-
- ```console
- ceph auth get client.admin
- ```
-
-2. Update two values in the `rook-ceph-mon` secret:
- - `ceph-username`: Set to `client.admin`
- - `ceph-secret`: Set the client.admin keyring
-
-After restarting the rook operator (and the toolbox if in use), rook will configure ceph with admin privileges.
-
-### 2. Copy the bash output
-
-Example Output:
-
-```console
-export ROOK_EXTERNAL_FSID=797f411a-aafe-11ec-a254-fa163e1539f5
-export ROOK_EXTERNAL_USERNAME=client.healthchecker
-export ROOK_EXTERNAL_CEPH_MON_DATA=ceph-rados-upstream-w4pdvq-node1-installer=10.0.210.83:6789
-export ROOK_EXTERNAL_USER_SECRET=AQAdm0FilZDSJxAAMucfuu/j0ZYYP4Bia8Us+w==
-export ROOK_EXTERNAL_DASHBOARD_LINK=https://10.0.210.83:8443/
-export CSI_RBD_NODE_SECRET=AQC1iDxip45JDRAAVahaBhKz1z0WW98+ACLqMQ==
-export CSI_RBD_PROVISIONER_SECRET=AQC1iDxiMM+LLhAA0PucjNZI8sG9Eh+pcvnWhQ==
-export MONITORING_ENDPOINT=10.0.210.83
-export MONITORING_ENDPOINT_PORT=9283
-export RBD_POOL_NAME=replicated_2g
-export RGW_POOL_PREFIX=default
-```
-
-## Commands on the K8s consumer cluster
-
-### Helm Installation
-
-To install with Helm, the rook cluster helm chart will configure the necessary resources for the external cluster with the example `values-external.yaml`.
-
-```console
-clusterNamespace=rook-ceph
-operatorNamespace=rook-ceph
-cd deploy/examples/charts/rook-ceph-cluster
-helm repo add rook-release https://charts.rook.io/release
-helm install --create-namespace --namespace $clusterNamespace rook-ceph rook-release/rook-ceph -f values.yaml
-helm install --create-namespace --namespace $clusterNamespace rook-ceph-cluster \
---set operatorNamespace=$operatorNamespace rook-release/rook-ceph-cluster -f values-external.yaml
-```
-
-Skip the manifest installation section and continue with [Cluster Verification](#cluster-verification).
-
-### Manifest Installation
-
-If not installing with Helm, here are the steps to install with manifests.
-
-1. Deploy Rook, create [common.yaml](https://github.com/rook/rook/blob/master/deploy/examples/common.yaml), [crds.yaml](https://github.com/rook/rook/blob/master/deploy/examples/crds.yaml) and [operator.yaml](https://github.com/rook/rook/blob/master/deploy/examples/operator.yaml) manifests.
-
-2. Create [common-external.yaml](https://github.com/rook/rook/blob/master/deploy/examples/external/common-external.yaml) and [cluster-external.yaml](https://github.com/rook/rook/blob/master/deploy/examples/external/cluster-external.yaml)
-
-### Import the Source Data
-
-1. Paste the above output from `create-external-cluster-resources.py` into your current shell to allow importing the source data.
-
-2. The import script in the next step uses the current kubeconfig context by
- default. If you want to specify the kubernetes cluster to use without
- changing the current context, you can specify the cluster name by setting
- the KUBECONTEXT environment variable.
-
- ```console
- export KUBECONTEXT=
- ```
-
-3. Here is the link for [import](https://github.com/rook/rook/blob/master/deploy/examples/external/import-external-cluster.sh) script. The script has used the `rook-ceph` namespace and few parameters that also have referenced from namespace variable. If user's external cluster has a different namespace, change the namespace parameter in the script according to their external cluster. For example with `new-namespace` namespace, this change is needed on the namespace parameter in the script.
-
- ```console
- NAMESPACE=${NAMESPACE:="new-namespace"}
- ```
-
-4. Run the import script.
-
- !!! note
- If your Rook cluster nodes are running a kernel earlier than or equivalent to 5.4, remove
- `fast-diff, object-map, deep-flatten,exclusive-lock` from the `imageFeatures` line.
-
- ```console
- . import-external-cluster.sh
- ```
-
-### Cluster Verification
-
-1. Verify the consumer cluster is connected to the source ceph cluster:
-
- ```console
- $ kubectl -n rook-ceph get CephCluster
- NAME DATADIRHOSTPATH MONCOUNT AGE STATE HEALTH
- rook-ceph-external /var/lib/rook 162m Connected HEALTH_OK
- ```
-
-2. Verify the creation of the storage class depending on the rbd pools and filesystem provided.
- `ceph-rbd` and `cephfs` would be the respective names for the RBD and CephFS storage classes.
-
- ```console
- kubectl -n rook-ceph get sc
- ```
-
-3. Then you can now create a [persistent volume](https://github.com/rook/rook/tree/master/deploy/examples/csi) based on these StorageClass.
-
-### Connect to an External Object Store
-
-Create the [external object store CR](https://github.com/rook/rook/blob/master/deploy/examples/external/object-external.yaml) to configure connection to external gateways.
-
-```console
-cd deploy/examples/external
-kubectl create -f object-external.yaml
-```
-
-Consume the S3 Storage, in two different ways:
-
-1. Create an [Object store user](https://github.com/rook/rook/blob/master/deploy/examples/object-user.yaml) for credentials to access the S3 endpoint.
-
- ```console
- cd deploy/examples
- kubectl create -f object-user.yaml
- ```
-
-2. Create a [bucket storage class](https://github.com/rook/rook/blob/master/deploy/examples/external/storageclass-bucket-delete.yaml) where a client can request creating buckets and then create the [Object Bucket Claim](https://github.com/rook/rook/blob/master/deploy/examples/external/object-bucket-claim-delete.yaml), which will create an individual bucket for reading and writing objects.
-
- ```console
- cd deploy/examples/external
- kubectl create -f storageclass-bucket-delete.yaml
- kubectl create -f object-bucket-claim-delete.yaml
- ```
-
-!!! hint
- For more details see the [Object Store topic](../../../Storage-Configuration/Object-Storage-RGW/object-storage.md#connect-to-an-external-object-store)
-
-### Connect to v2 mon port
-
-If encryption or compression on the wire is needed, specify the `--v2-port-enable` flag.
-If the v2 address type is present in the `ceph quorum_status`, then the output of 'ceph mon data' i.e, `ROOK_EXTERNAL_CEPH_MON_DATA` will use the v2 port(`3300`).
-
-### NFS storage
-
-Rook suggests a different mechanism for making use of an [NFS service running on the external Ceph standalone cluster](../../../Storage-Configuration/NFS/nfs-csi-driver.md#consuming-nfs-from-an-external-source), if desired.
-
-## Exporting Rook to another cluster
-
-If you have multiple K8s clusters running, and want to use the local `rook-ceph` cluster as the central storage,
-you can export the settings from this cluster with the following steps.
-
-1. Copy create-external-cluster-resources.py into the directory `/etc/ceph/` of the toolbox.
-
- ```console
- toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
- kubectl -n rook-ceph cp deploy/examples/external/create-external-cluster-resources.py $toolbox:/etc/ceph
- ```
-
-2. Exec to the toolbox pod and execute create-external-cluster-resources.py with needed options to create required [users and keys](#1-create-all-users-and-keys).
-
-!!! important
- For other clusters to connect to storage in this cluster, Rook must be configured with a networking configuration that is accessible from other clusters. Most commonly this is done by enabling host networking in the CephCluster CR so the Ceph daemons will be addressable by their host IPs.
+* [Connect to an External Object Store](/Documentation/CRDs/Cluster/external-cluster/advance-external.md#connect-to-an-external-object-store)
## Upgrades
-Upgrading the cluster would be different for restricted caps and non-restricted caps,
-
-1. If consumer cluster doesn't have restricted caps, this will upgrade all the default CSI users (non-restricted)
-
- ```console
- python3 create-external-cluster-resources.py --upgrade
- ```
-
-2. If the consumer cluster has restricted caps
-
- Restricted users created using `--restricted-auth-permission` flag need to pass mandatory flags: '`--rbd-data-pool-name`(if it is a rbd user), `--k8s-cluster-name` and `--run-as-user`' flags while upgrading, in case of cephfs users if you have passed `--cephfs-filesystem-name` flag while creating CSI users then while upgrading it will be mandatory too. In this example the user would be `client.csi-rbd-node-rookstorage-replicapool` (following the pattern `csi-user-clusterName-poolName`)
-
- ```console
- python3 create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
- ```
-
- !!! note
- 1) An existing non-restricted user cannot be converted to a restricted user by upgrading.
- 2) The upgrade flag should only be used to append new permissions to users. It shouldn't be used for changing a CSI user already applied permissions. For example, be careful not to change pools(s) that a user has access to.
-
-### Upgrade cluster to utilize new feature
-
-Some Rook upgrades may require re-running the import steps, or may introduce new external cluster features that can be most easily enabled by re-running the import steps.
-
-To re-run the import steps with new options, the python script should be re-run using the same configuration options that were used for past invocations, plus the configurations that are being added or modified.
-
-Starting with Rook v1.15, the script stores the configuration in the external-cluster-user-command configmap for easy future reference.
-
-* arg: Exact arguments that were used for for processing the script. Argument that are decided using the Priority: command-line-args > config.ini file values > default values.
-
-#### Example `external-cluster-user-command` ConfigMap:
-
-1. Get the last-applied config, if its available
-
- ```console
- $ kubectl get configmap -namespace rook-ceph external-cluster-user-command --output jsonpath='{.data.args}'
- ```
-
-2. Copy the output to config.ini
-
-3. Make any desired modifications and additions to `config.ini``
-
-4. Run the python script again using the [config file](#config-file)
-
-5. [Copy the bash output](#2-copy-the-bash-output)
-
-6. Run the steps under [import-the-source-data](#import-the-source-data)
+* [Upgrade external cluster](/Documentation/CRDs/Cluster/external-cluster/upgrade-external.md#upgrade-external-cluster)
-!!! warning
- If the last-applied config is unavailable, run the current version of the script again using previously-applied config and CLI flags.
- Failure to reuse the same configuration options when re-invoking the python script can result in unexpected changes when re-running the import script.
+* [Utilize new features in upgrade](/Documentation/CRDs/Cluster/external-cluster/upgrade-external.md#upgrade-cluster-to-utilize-new-feature)
diff --git a/Documentation/CRDs/Cluster/external-cluster/provider-export.md b/Documentation/CRDs/Cluster/external-cluster/provider-export.md
new file mode 100644
index 000000000000..7f3b3e8e7abf
--- /dev/null
+++ b/Documentation/CRDs/Cluster/external-cluster/provider-export.md
@@ -0,0 +1,125 @@
+# Export config from the Ceph provider cluster
+
+In order to configure an external Ceph cluster with Rook, we need to extract some information in order to connect to that cluster.
+
+## 1. Create all users and keys
+
+Run the python script [create-external-cluster-resources.py](https://github.com/rook/rook/blob/master/deploy/examples/external/create-external-cluster-resources.py) for creating all users and keys.
+
+```console
+python3 create-external-cluster-resources.py --rbd-data-pool-name --cephfs-filesystem-name --rgw-endpoint --namespace --format bash
+```
+
+* `--namespace`: Namespace where CephCluster will run, for example `rook-ceph`
+* `--format bash`: The format of the output
+* `--rbd-data-pool-name`: The name of the RBD data pool
+* `--alias-rbd-data-pool-name`: Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore
+* `--rgw-endpoint`: (optional) The RADOS Gateway endpoint in the format `:` or `:`.
+* `--rgw-pool-prefix`: (optional) The prefix of the RGW pools. If not specified, the default prefix is `default`
+* `--rgw-tls-cert-path`: (optional) RADOS Gateway endpoint TLS certificate file path
+* `--rgw-skip-tls`: (optional) Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED)
+* `--rbd-metadata-ec-pool-name`: (optional) Provides the name of erasure coded RBD metadata pool, used for creating ECRBDStorageClass.
+* `--monitoring-endpoint`: (optional) Ceph Manager prometheus exporter endpoints (comma separated list of IP entries of active and standby mgrs)
+* `--monitoring-endpoint-port`: (optional) Ceph Manager prometheus exporter port
+* `--skip-monitoring-endpoint`: (optional) Skip prometheus exporter endpoints, even if they are available. Useful if the prometheus module is not enabled
+* `--ceph-conf`: (optional) Provide a Ceph conf file
+* `--keyring`: (optional) Path to Ceph keyring file, to be used with `--ceph-conf`
+* `--k8s-cluster-name`: (optional) Kubernetes cluster name
+* `--output`: (optional) Output will be stored into the provided file
+* `--dry-run`: (optional) Prints the executed commands without running them
+* `--run-as-user`: (optional) Provides a user name to check the cluster's health status, must be prefixed by `client`.
+* `--cephfs-metadata-pool-name`: (optional) Provides the name of the cephfs metadata pool
+* `--cephfs-filesystem-name`: (optional) The name of the filesystem, used for creating CephFS StorageClass
+* `--cephfs-data-pool-name`: (optional) Provides the name of the CephFS data pool, used for creating CephFS StorageClass
+* `--rados-namespace`: (optional) Divides a pool into separate logical namespaces, used for creating RBD PVC in a CephBlockPoolRadosNamespace (should be lower case)
+* `--subvolume-group`: (optional) Provides the name of the subvolume group, used for creating CephFS PVC in a subvolumeGroup
+* `--rgw-realm-name`: (optional) Provides the name of the rgw-realm
+* `--rgw-zone-name`: (optional) Provides the name of the rgw-zone
+* `--rgw-zonegroup-name`: (optional) Provides the name of the rgw-zone-group
+* `--upgrade`: (optional) Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied.
+* `--restricted-auth-permission`: (optional) Restrict cephCSIKeyrings auth permissions to specific pools, and cluster. Mandatory flags that need to be set are `--rbd-data-pool-name`, and `--k8s-cluster-name`. `--cephfs-filesystem-name` flag can also be passed in case of CephFS user restriction, so it can restrict users to particular CephFS filesystem.
+* `--v2-port-enable`: (optional) Enables the v2 mon port (3300) for mons.
+* `--topology-pools`: (optional) Comma-separated list of topology-constrained rbd pools
+* `--topology-failure-domain-label`: (optional) K8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain
+* `--topology-failure-domain-values`: (optional) Comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list
+* `--config-file`: Path to the configuration file, Priority: command-line-args > config.ini values > default values
+
+## 2. Copy the bash output
+
+Example Output:
+
+```console
+export ROOK_EXTERNAL_FSID=797f411a-aafe-11ec-a254-fa163e1539f5
+export ROOK_EXTERNAL_USERNAME=client.healthchecker
+export ROOK_EXTERNAL_CEPH_MON_DATA=ceph-rados-upstream-w4pdvq-node1-installer=10.0.210.83:6789
+export ROOK_EXTERNAL_USER_SECRET=AQAdm0FilZDSJxAAMucfuu/j0ZYYP4Bia8Us+w==
+export ROOK_EXTERNAL_DASHBOARD_LINK=https://10.0.210.83:8443/
+export CSI_RBD_NODE_SECRET=AQC1iDxip45JDRAAVahaBhKz1z0WW98+ACLqMQ==
+export CSI_RBD_PROVISIONER_SECRET=AQC1iDxiMM+LLhAA0PucjNZI8sG9Eh+pcvnWhQ==
+export MONITORING_ENDPOINT=10.0.210.83
+export MONITORING_ENDPOINT_PORT=9283
+export RBD_POOL_NAME=replicated_2g
+export RGW_POOL_PREFIX=default
+```
+
+## Examples on utilizing Advance flags
+
+### Config-file
+
+Use the config file to set the user configuration file, add the flag `--config-file` to set the file path.
+
+Example:
+
+`/config.ini`
+
+```console
+[Configurations]
+format = bash
+cephfs-filesystem-name =
+rbd-data-pool-name =
+...
+```
+
+```console
+python3 create-external-cluster-resources.py --config-file /config.ini
+```
+
+!!! note
+ You can use both config file and other arguments at the same time
+ Priority: command-line-args has more priority than config.ini file values, and config.ini file values have more priority than default values.
+
+### Multi-tenancy
+
+To enable multi-tenancy, run the script with the `--restricted-auth-permission` flag and pass the mandatory flags with it,
+It will generate the secrets which you can use for creating new `Consumer cluster` deployment using the same `Provider cluster`(ceph cluster).
+So you would be running different isolated consumer clusters on top of single `Provider cluster`.
+
+!!! note
+ Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users.
+ So apply these secrets only to new `Consumer cluster` deployment while using the same `Provider cluster`.
+
+```console
+python3 create-external-cluster-resources.py --cephfs-filesystem-name --rbd-data-pool-name --k8s-cluster-name --restricted-auth-permission true --format --rgw-endpoint --namespace
+```
+
+### RGW Multisite
+
+Pass the `--rgw-realm-name`, `--rgw-zonegroup-name` and `--rgw-zone-name` flags to create the admin ops user in a master zone, zonegroup and realm.
+See the [Multisite doc](https://docs.ceph.com/en/quincy/radosgw/multisite/#configuring-a-master-zone) for creating a zone, zonegroup and realm.
+
+```console
+python3 create-external-cluster-resources.py --rbd-data-pool-name --format bash --rgw-endpoint --rgw-realm-name > --rgw-zonegroup-name --rgw-zone-name >
+```
+
+### Topology Based Provisioning
+
+Enable Topology Based Provisioning for RBD pools by passing `--topology-pools`, `--topology-failure-domain-label` and `--topology-failure-domain-values` flags.
+A new storageclass named `ceph-rbd-topology` will be created by the import script with `volumeBindingMode: WaitForFirstConsumer`.
+The storageclass is used to create a volume in the pool matching the topology where a pod is scheduled.
+
+For more details, see the [Topology-Based Provisioning](topology-for-external-mode.md)
+
+### Connect to v2 mon port
+
+If encryption or compression on the wire is needed, specify the `--v2-port-enable` flag.
+If the v2 address type is present in the `ceph quorum_status`, then the output of 'ceph mon data' i.e, `ROOK_EXTERNAL_CEPH_MON_DATA` will use the v2 port(`3300`).
diff --git a/Documentation/CRDs/Cluster/external-cluster/upgrade-external.md b/Documentation/CRDs/Cluster/external-cluster/upgrade-external.md
new file mode 100644
index 000000000000..7ee3110ca525
--- /dev/null
+++ b/Documentation/CRDs/Cluster/external-cluster/upgrade-external.md
@@ -0,0 +1,58 @@
+# External Cluster Upgrades
+
+When upgrading an external cluster, Ceph and Rook versions will be updated independently. During the Rook update, the external provider cluster connection also needs to be updated with any settings and permissions for new features.
+
+## Upgrade the cluster to consume latest ceph user caps (mandatory)
+
+Upgrading the cluster would be different for restricted caps and non-restricted caps,
+
+1. If consumer cluster doesn't have restricted caps, this will upgrade all the default CSI users (non-restricted)
+
+ ```console
+ python3 create-external-cluster-resources.py --upgrade
+ ```
+
+2. If the consumer cluster has restricted caps
+
+ Restricted users created using `--restricted-auth-permission` flag need to pass mandatory flags: '`--rbd-data-pool-name`(if it is a rbd user), `--k8s-cluster-name` and `--run-as-user`' flags while upgrading, in case of cephfs users if you have passed `--cephfs-filesystem-name` flag while creating CSI users then while upgrading it will be mandatory too. In this example the user would be `client.csi-rbd-node-rookstorage-replicapool` (following the pattern `csi-user-clusterName-poolName`)
+
+ ```console
+ python3 create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool
+ ```
+
+ !!! note
+ 1) An existing non-restricted user cannot be converted to a restricted user by upgrading.
+ 2) The upgrade flag should only be used to append new permissions to users. It shouldn't be used for changing a CSI user already applied permissions. For example, be careful not to change pools(s) that a user has access to.
+
+## Upgrade cluster to utilize a new feature (optional)
+
+Some Rook upgrades may require re-running the import steps, or may introduce new external cluster features that can be most easily enabled by re-running the import steps.
+
+To re-run the import steps with new options, the python script should be re-run using the same configuration options that were used for past invocations, plus the configurations that are being added or modified.
+
+Starting with Rook v1.15, the script stores the configuration in the external-cluster-user-command configmap for easy future reference.
+
+* arg: Exact arguments that were used for for processing the script.
+Argument that are decided using the Priority: command-line-args has more priority than config.ini file values, and config.ini file values have more priority than default values.
+
+### Example `external-cluster-user-command` ConfigMap:
+
+1. Get the last-applied config, if its available
+
+ ```console
+ $ kubectl get configmap -namespace rook-ceph external-cluster-user-command --output jsonpath='{.data.args}'
+ ```
+
+2. Copy the output to config.ini
+
+3. Make any desired modifications and additions to `config.ini``
+
+4. Run the python script again using the [config file](/Documentation/CRDs/Cluster/external-cluster/provider-export.md#config-file)
+
+5. [Copy the bash output](/Documentation/CRDs/Cluster/external-cluster/provider-export.md#2-copy-the-bash-output)
+
+6. [Import-the-provider-data](/Documentation/CRDs/Cluster/external-cluster/consumer-import.md#import-the-provider-data)
+
+!!! warning
+ If the last-applied config is unavailable, run the current version of the script again using previously-applied config and CLI flags.
+ Failure to reuse the same configuration options when re-invoking the python script can result in unexpected changes when re-running the import script.
From 166e4f35b2364121403b1c5a2d55e02e06482056 Mon Sep 17 00:00:00 2001
From: parth-gr
Date: Fri, 20 Sep 2024 15:27:12 +0530
Subject: [PATCH 27/40] csi: fix the disable driver flag in the csi driver
reconcile
ROOK_CSI_DISABLE_DRIVER was not working accurate
from https://github.com/rook/rook/pull/14489
The csi driver was installed even if its not
expected
Signed-off-by: parth-gr
---
pkg/operator/ceph/csi/controller.go | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/pkg/operator/ceph/csi/controller.go b/pkg/operator/ceph/csi/controller.go
index b93fe7a09bb8..31c9d7924a0f 100644
--- a/pkg/operator/ceph/csi/controller.go
+++ b/pkg/operator/ceph/csi/controller.go
@@ -332,8 +332,7 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
}
}
- if !EnableCSIOperator() {
-
+ if !disableCSI && !EnableCSIOperator() {
err = r.validateAndConfigureDrivers(serverVersion, ownerInfo)
if err != nil {
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to configure ceph csi")
From 33c9bdf652d02b8d9013c16fa0e8dc51c0fe7236 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 23 Sep 2024 12:24:01 +0000
Subject: [PATCH 28/40] build(deps): bump github/codeql-action from 3.26.7 to
3.26.8
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.7 to 3.26.8.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/8214744c546c1e5c8f03dde8fab3a7353211988d...294a9d92911152fe08befb9ec03e240add280cb3)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
.github/workflows/scorecards.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 2a8dca108edb..3e68e8d803cb 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -64,6 +64,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@8214744c546c1e5c8f03dde8fab3a7353211988d # v3.26.7
+ uses: github/codeql-action/upload-sarif@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8
with:
sarif_file: results.sarif
From 9ecd6a250003eec0f127409ae43b8cd872790ad9 Mon Sep 17 00:00:00 2001
From: Michael Adam
Date: Fri, 20 Sep 2024 13:52:42 +0200
Subject: [PATCH 29/40] build: support golang 1.23
Signed-off-by: Michael Adam
---
.github/workflows/build.yml | 2 +-
build/makelib/golang.mk | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 5d7b9462dd49..844c71b7cb0e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -71,7 +71,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- go-version: ["1.22"]
+ go-version: ["1.22", "1.23"]
steps:
- name: checkout
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
diff --git a/build/makelib/golang.mk b/build/makelib/golang.mk
index e1ae8468e0af..44814e2b577a 100644
--- a/build/makelib/golang.mk
+++ b/build/makelib/golang.mk
@@ -48,7 +48,7 @@ GO_TEST_FLAGS ?=
# ====================================================================================
# Setup go environment
-GO_SUPPORTED_VERSIONS ?= 1.22
+GO_SUPPORTED_VERSIONS ?= 1.22|1.23
GO_PACKAGES := $(foreach t,$(GO_SUBDIRS),$(GO_PROJECT)/$(t)/...)
GO_INTEGRATION_TEST_PACKAGES := $(foreach t,$(GO_INTEGRATION_TESTS_SUBDIRS),$(GO_PROJECT)/$(t)/integration)
From 1c9727c271667d2b4faadb164735996a3c757187 Mon Sep 17 00:00:00 2001
From: Travis Nielsen
Date: Fri, 20 Sep 2024 14:04:51 -0600
Subject: [PATCH 30/40] docs: declare cephconfig settings stable
The cephConfig settings in the CephCluster CR have been
stable and there are no planned changes, so remove the
experimental documentation indicator. Also, clarify
the usage and the precedence of the ceph config
options.
Signed-off-by: Travis Nielsen
---
Documentation/CRDs/Cluster/ceph-cluster-crd.md | 14 +++++++++++---
.../Advanced/ceph-configuration.md | 2 +-
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
index 29ded5c34a8b..38edb30d8eb0 100755
--- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md
+++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
@@ -825,9 +825,6 @@ set the `allowUninstallWithVolumes` to true under `spec.CleanupPolicy`.
## Ceph Config
-!!! attention
- This feature is experimental.
-
The Ceph config options are applied after the MONs are all in quorum and running.
To set Ceph config options, you can add them to your `CephCluster` spec as shown below.
See the [Ceph config reference](https://docs.ceph.com/en/latest/rados/configuration/general-config-ref/)
@@ -848,6 +845,17 @@ spec:
osd_max_scrubs: "10"
```
+The Rook operator will actively apply these values, whereas the
+[ceph.conf settings](../../Storage-Configuration/Advanced/ceph-configuration/#custom-cephconf-settings)
+only take effect after the Ceph daemon pods are restarted.
+
+If both these `cephConfig` and [ceph.conf settings](../../Storage-Configuration/Advanced/ceph-configuration/#custom-cephconf-settings)
+are applied, the `cephConfig` settings will take higher precedence if there is an overlap.
+
+If Ceph settings need to be applied to mons before quorum is initially created, the
+[ceph.conf settings](../../Storage-Configuration/Advanced/ceph-configuration/#custom-cephconf-settings)
+should be used instead.
+
!!! warning
Rook performs no direct validation on these config options, so the validity of the settings is the
user's responsibility.
diff --git a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
index 399b78a24a0e..3b439e27ecb5 100644
--- a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
+++ b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
@@ -208,7 +208,7 @@ ceph osd pool set rbd pg_num 512
## Custom `ceph.conf` Settings
-!!! warning
+!!! info
The advised method for controlling Ceph configuration is to use the [`cephConfig:` structure](../../CRDs/Cluster/ceph-cluster-crd.md#ceph-config)
in the `CephCluster` CRD.
It is highly recommended that this only be used when absolutely necessary and that the `config` be
From b818dd1c7d3fdf90532d23e23dd401c4bb5be172 Mon Sep 17 00:00:00 2001
From: subhamkrai
Date: Tue, 24 Sep 2024 16:59:13 +0530
Subject: [PATCH 31/40] build: keep specific z version in go mod 1.22.5
let's keep specific z version in go.mod 1.22.5.
Signed-off-by: subhamkrai
---
go.mod | 6 +++---
go.sum | 8 ++++----
pkg/apis/go.mod | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/go.mod b/go.mod
index add877c686ef..306f52aff40f 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/rook/rook
-go 1.22.6
+go 1.22.5
toolchain go1.22.7
@@ -18,10 +18,10 @@ require (
github.com/IBM/keyprotect-go-client v0.15.1
github.com/aws/aws-sdk-go v1.55.5
github.com/banzaicloud/k8s-objectmatcher v1.8.0
- github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c
+ github.com/ceph/ceph-csi-operator/api v0.0.0-20240918113437-f3030b0ac9f4
github.com/ceph/go-ceph v0.29.0
github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8
- github.com/csi-addons/kubernetes-csi-addons v0.10.0
+ github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1
github.com/gemalto/kmip-go v0.0.10
github.com/go-ini/ini v1.67.0
github.com/google/go-cmp v0.6.0
diff --git a/go.sum b/go.sum
index b58bb5fb030f..35c658de2e3a 100644
--- a/go.sum
+++ b/go.sum
@@ -165,8 +165,8 @@ github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c h1:JOhwt7+iM18pm9s9zAhAKGRJm615AdIaKklbUd7Z8So=
-github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c/go.mod h1:odEUoarG26wXBCC2l4O4nMWhAz6VTKr2FRkv9yELgi8=
+github.com/ceph/ceph-csi-operator/api v0.0.0-20240918113437-f3030b0ac9f4 h1:VYmHuPxEeiZ9jusRzgGybte2ByOUxAD0btQ0Wvac4VA=
+github.com/ceph/ceph-csi-operator/api v0.0.0-20240918113437-f3030b0ac9f4/go.mod h1:odEUoarG26wXBCC2l4O4nMWhAz6VTKr2FRkv9yELgi8=
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83 h1:xWhLO5MR+diAsZoOcPe0zVe+JcJrqMaVbScShye6pXw=
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83/go.mod h1:ZSvtS90FCB/becFi/rjy85sSw1igchaWZfUigxN9FxY=
github.com/ceph/go-ceph v0.29.0 h1:pJQY+++PyY2FMP0ffVaE7FbIdivemBPCu4MWr4S8CtI=
@@ -214,8 +214,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/csi-addons/kubernetes-csi-addons v0.10.0 h1:bBc6nb1oROz4RLhqoLFNeGymk2jIRXcx7LvAup9+3Jg=
-github.com/csi-addons/kubernetes-csi-addons v0.10.0/go.mod h1:nqi369YuYMIdysBbHjtYJcWFpcxujPot1HS6tnNWBV4=
+github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1 h1:9mh79gS8O8uO5okZ2DhFO0LSrhpVXd9R9DLvbnh2He4=
+github.com/csi-addons/kubernetes-csi-addons v0.10.1-0.20240924092040-c11db0b867a1/go.mod h1:LeY7UYm8nEBCG1RcJG0DHmJbva0ILmtp+kcegxRuHhc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 0230510aa324..2626abd58642 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -1,6 +1,6 @@
module github.com/rook/rook/pkg/apis
-go 1.22.6
+go 1.22.5
toolchain go1.22.7
From 1a95d6f5338e7284d1ab526fcc35fda1ab3ca137 Mon Sep 17 00:00:00 2001
From: Santosh Pillai
Date: Mon, 23 Sep 2024 10:53:19 +0530
Subject: [PATCH 32/40] core: preserve pool application name change
default application name is updated inside the `CreatePool` method. Send
pool spec as address in order to preserve this change.
Signed-off-by: Santosh Pillai
---
pkg/daemon/ceph/client/pool.go | 8 ++++----
pkg/operator/ceph/file/filesystem.go | 14 ++++++++------
pkg/operator/ceph/object/objectstore.go | 2 +-
pkg/operator/ceph/pool/controller.go | 2 +-
pkg/operator/ceph/pool/controller_test.go | 23 ++++++++++++++++++++---
5 files changed, 34 insertions(+), 15 deletions(-)
diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go
index 82eae5e77eb9..536f5ff908b9 100644
--- a/pkg/daemon/ceph/client/pool.go
+++ b/pkg/daemon/ceph/client/pool.go
@@ -178,11 +178,11 @@ func ParsePoolDetails(in []byte) (CephStoragePoolDetails, error) {
return poolDetails, nil
}
-func CreatePool(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, pool cephv1.NamedPoolSpec) error {
+func CreatePool(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, pool *cephv1.NamedPoolSpec) error {
return CreatePoolWithPGs(context, clusterInfo, clusterSpec, pool, DefaultPGCount)
}
-func CreatePoolWithPGs(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, pool cephv1.NamedPoolSpec, pgCount string) error {
+func CreatePoolWithPGs(context *clusterd.Context, clusterInfo *ClusterInfo, clusterSpec *cephv1.ClusterSpec, pool *cephv1.NamedPoolSpec, pgCount string) error {
if pool.Name == "" {
return errors.New("pool name must be specified")
}
@@ -196,7 +196,7 @@ func CreatePoolWithPGs(context *clusterd.Context, clusterInfo *ClusterInfo, clus
}
if pool.IsReplicated() {
- return createReplicatedPoolForApp(context, clusterInfo, clusterSpec, pool, pgCount)
+ return createReplicatedPoolForApp(context, clusterInfo, clusterSpec, *pool, pgCount)
}
if !pool.IsErasureCoded() {
@@ -215,7 +215,7 @@ func CreatePoolWithPGs(context *clusterd.Context, clusterInfo *ClusterInfo, clus
context,
clusterInfo,
ecProfileName,
- pool,
+ *pool,
pgCount,
true /* enableECOverwrite */)
}
diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go
index 8a35ab1a0231..b8e6b36cf303 100644
--- a/pkg/operator/ceph/file/filesystem.go
+++ b/pkg/operator/ceph/file/filesystem.go
@@ -196,16 +196,17 @@ func createOrUpdatePools(f *Filesystem, context *clusterd.Context, clusterInfo *
PoolSpec: spec.MetadataPool,
}
metadataPool.Application = cephfsApplication
- err := cephclient.CreatePool(context, clusterInfo, clusterSpec, metadataPool)
+ err := cephclient.CreatePool(context, clusterInfo, clusterSpec, &metadataPool)
if err != nil {
return errors.Wrapf(err, "failed to update metadata pool %q", metadataPool.Name)
}
// generating the data pool's name
dataPoolNames := generateDataPoolNames(f, spec)
- for i, dataPool := range spec.DataPools {
+ for i := range spec.DataPools {
+ dataPool := spec.DataPools[i]
dataPool.Name = dataPoolNames[i]
dataPool.Application = cephfsApplication
- err := cephclient.CreatePool(context, clusterInfo, clusterSpec, dataPool)
+ err := cephclient.CreatePool(context, clusterInfo, clusterSpec, &dataPool)
if err != nil {
return errors.Wrapf(err, "failed to update datapool %q", dataPool.Name)
}
@@ -269,18 +270,19 @@ func (f *Filesystem) doFilesystemCreate(context *clusterd.Context, clusterInfo *
PoolSpec: spec.MetadataPool,
}
if _, poolFound := reversedPoolMap[metadataPool.Name]; !poolFound {
- err = cephclient.CreatePool(context, clusterInfo, clusterSpec, metadataPool)
+ err = cephclient.CreatePool(context, clusterInfo, clusterSpec, &metadataPool)
if err != nil {
return errors.Wrapf(err, "failed to create metadata pool %q", metadataPool.Name)
}
}
dataPoolNames := generateDataPoolNames(f, spec)
- for i, dataPool := range spec.DataPools {
+ for i := range spec.DataPools {
+ dataPool := spec.DataPools[i]
dataPool.Name = dataPoolNames[i]
dataPool.Application = cephfsApplication
if _, poolFound := reversedPoolMap[dataPool.Name]; !poolFound {
- err = cephclient.CreatePool(context, clusterInfo, clusterSpec, dataPool)
+ err = cephclient.CreatePool(context, clusterInfo, clusterSpec, &dataPool)
if err != nil {
return errors.Wrapf(err, "failed to create data pool %q", dataPool.Name)
}
diff --git a/pkg/operator/ceph/object/objectstore.go b/pkg/operator/ceph/object/objectstore.go
index 794419ac8498..b34642938325 100644
--- a/pkg/operator/ceph/object/objectstore.go
+++ b/pkg/operator/ceph/object/objectstore.go
@@ -1116,7 +1116,7 @@ func createRGWPool(ctx *Context, cluster *cephv1.ClusterSpec, poolSpec cephv1.Po
Name: poolName(ctx.Name, requestedName),
PoolSpec: poolSpec,
}
- if err := cephclient.CreatePoolWithPGs(ctx.Context, ctx.clusterInfo, cluster, pool, pgCount); err != nil {
+ if err := cephclient.CreatePoolWithPGs(ctx.Context, ctx.clusterInfo, cluster, &pool, pgCount); err != nil {
return errors.Wrapf(err, "failed to create pool %q", pool.Name)
}
// Set the pg_num_min if not the default so the autoscaler won't immediately increase the pg count
diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go
index 24d080ebf644..14f57c3413c2 100644
--- a/pkg/operator/ceph/pool/controller.go
+++ b/pkg/operator/ceph/pool/controller.go
@@ -381,7 +381,7 @@ func createPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo,
}
// create the pool
logger.Infof("creating pool %q in namespace %q", p.Name, clusterInfo.Namespace)
- if err := cephclient.CreatePool(context, clusterInfo, clusterSpec, *p); err != nil {
+ if err := cephclient.CreatePool(context, clusterInfo, clusterSpec, p); err != nil {
return errors.Wrapf(err, "failed to configure pool %q", p.Name)
}
diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go
index 29a277370917..74afcc2d78a0 100644
--- a/pkg/operator/ceph/pool/controller_test.go
+++ b/pkg/operator/ceph/pool/controller_test.go
@@ -52,6 +52,18 @@ func TestCreatePool(t *testing.T) {
enabledMgrApp := false
clusterInfo := cephclient.AdminTestClusterInfo("mycluster")
executor := &exectest.MockExecutor{
+ MockExecuteCommandWithTimeout: func(timeout time.Duration, command string, args ...string) (string, error) {
+ logger.Infof("CommandTimeout: %s %v", command, args)
+ if command == "rbd" {
+ if args[0] == "pool" && args[1] == "init" {
+ // assert that `rbd pool init` is only run when application is set to `rbd`
+ assert.Equal(t, "rbd", p.Application)
+ assert.Equal(t, p.Name, args[2])
+ return "{}", nil
+ }
+ }
+ return "", nil
+ },
MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) {
logger.Infof("Command: %s %v", command, args)
if command == "ceph" {
@@ -69,6 +81,7 @@ func TestCreatePool(t *testing.T) {
assert.Equal(t, ".mgr", args[4])
assert.Equal(t, "mgr", args[5])
} else {
+ fmt.Printf("pool - %v", args)
assert.Fail(t, fmt.Sprintf("invalid pool %q", args[4]))
}
}
@@ -79,14 +92,12 @@ func TestCreatePool(t *testing.T) {
return "{}", nil
} else if args[0] == "mirror" && args[2] == "disable" {
return "", nil
- } else {
- assert.Equal(t, []string{"pool", "init", p.Name}, args[0:3])
}
-
}
return "", nil
},
}
+
context := &clusterd.Context{Executor: executor}
clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{cephclient.CrushRootConfigKey: "cluster-crush-root"}}}
@@ -95,6 +106,8 @@ func TestCreatePool(t *testing.T) {
p.Name = "replicapool"
p.Replicated.Size = 1
p.Replicated.RequireSafeReplicaSize = false
+ // reset the application name
+ p.Application = ""
err := createPool(context, clusterInfo, clusterSpec, p)
assert.Nil(t, err)
assert.False(t, enabledMetricsApp)
@@ -102,6 +115,8 @@ func TestCreatePool(t *testing.T) {
t.Run("built-in mgr pool", func(t *testing.T) {
p.Name = ".mgr"
+ // reset the application name
+ p.Application = ""
err := createPool(context, clusterInfo, clusterSpec, p)
assert.Nil(t, err)
assert.True(t, enabledMgrApp)
@@ -112,6 +127,8 @@ func TestCreatePool(t *testing.T) {
p.Replicated.Size = 0
p.ErasureCoded.CodingChunks = 1
p.ErasureCoded.DataChunks = 2
+ // reset the application name
+ p.Application = ""
err := createPool(context, clusterInfo, clusterSpec, p)
assert.Nil(t, err)
})
From b7539890e807960aad9223563455efd4e387fbb1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 30 Sep 2024 12:08:33 +0000
Subject: [PATCH 33/40] build(deps): bump go.uber.org/automaxprocs from 1.5.3
to 1.6.0
Bumps [go.uber.org/automaxprocs](https://github.com/uber-go/automaxprocs) from 1.5.3 to 1.6.0.
- [Release notes](https://github.com/uber-go/automaxprocs/releases)
- [Changelog](https://github.com/uber-go/automaxprocs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/uber-go/automaxprocs/compare/v1.5.3...v1.6.0)
---
updated-dependencies:
- dependency-name: go.uber.org/automaxprocs
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 306f52aff40f..db922fc34d1e 100644
--- a/go.mod
+++ b/go.mod
@@ -39,7 +39,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/sykesm/zap-logfmt v0.0.4
- go.uber.org/automaxprocs v1.5.3
+ go.uber.org/automaxprocs v1.6.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.8.0
diff --git a/go.sum b/go.sum
index 35c658de2e3a..a23d2eeb71c2 100644
--- a/go.sum
+++ b/go.sum
@@ -935,8 +935,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
From c96641f29fcc50854745b33cf0817de8835942e4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 30 Sep 2024 12:54:51 +0000
Subject: [PATCH 34/40] build(deps): bump actions/checkout from 4.1.7 to 4.2.0
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938)
---
updated-dependencies:
- dependency-name: actions/checkout
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
.github/workflows/build.yml | 4 +-
.github/workflows/canary-integration-test.yml | 42 +++++++++----------
.github/workflows/codegen.yml | 2 +-
.github/workflows/codespell.yaml | 4 +-
.github/workflows/commitlint.yml | 2 +-
.github/workflows/crds-gen.yml | 2 +-
.github/workflows/daily-nightly-jobs.yml | 20 ++++-----
.github/workflows/docs-check.yml | 2 +-
.github/workflows/golangci-lint.yaml | 2 +-
.github/workflows/helm-lint.yaml | 2 +-
.../integration-test-helm-suite.yaml | 2 +-
.../integration-test-keystone-auth-suite.yaml | 2 +-
.../workflows/integration-test-mgr-suite.yaml | 2 +-
.../integration-test-multi-cluster-suite.yaml | 2 +-
.../integration-test-object-suite.yaml | 2 +-
.../integration-test-smoke-suite.yaml | 2 +-
.../integration-test-upgrade-suite.yaml | 4 +-
.../integration-tests-on-release.yaml | 12 +++---
.github/workflows/linters.yaml | 4 +-
.github/workflows/mod-check.yml | 2 +-
.github/workflows/multus.yaml | 2 +-
.github/workflows/push-build.yaml | 2 +-
.github/workflows/rbac-gen.yaml | 2 +-
.github/workflows/scorecards.yml | 2 +-
.github/workflows/shellcheck.yaml | 2 +-
.github/workflows/snyk.yaml | 2 +-
.github/workflows/unit-test.yml | 2 +-
27 files changed, 65 insertions(+), 65 deletions(-)
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 844c71b7cb0e..4187366d735c 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -21,7 +21,7 @@ jobs:
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')"
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -74,7 +74,7 @@ jobs:
go-version: ["1.22", "1.23"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml
index 0c0dfcfbee0d..d3e90d0e1a4a 100644
--- a/.github/workflows/canary-integration-test.yml
+++ b/.github/workflows/canary-integration-test.yml
@@ -25,7 +25,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -332,7 +332,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -409,7 +409,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
- name: consider debugging
@@ -458,7 +458,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -506,7 +506,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -562,7 +562,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -612,7 +612,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -667,7 +667,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -743,7 +743,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -795,7 +795,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -850,7 +850,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -918,7 +918,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -972,7 +972,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1036,7 +1036,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1122,7 +1122,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1188,7 +1188,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1245,7 +1245,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1504,7 +1504,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1532,7 +1532,7 @@ jobs:
matrix:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1561,7 +1561,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -1649,7 +1649,7 @@ jobs:
ceph-image: ${{ fromJson(inputs.ceph_images) }}
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/codegen.yml b/.github/workflows/codegen.yml
index 987095be6820..79db2fc48ac7 100644
--- a/.github/workflows/codegen.yml
+++ b/.github/workflows/codegen.yml
@@ -30,7 +30,7 @@ jobs:
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')"
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml
index 78f387374c30..970e246e3440 100644
--- a/.github/workflows/codespell.yaml
+++ b/.github/workflows/codespell.yaml
@@ -24,7 +24,7 @@ jobs:
name: codespell
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
- name: codespell
@@ -53,7 +53,7 @@ jobs:
name: misspell
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
- name: misspell
diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml
index aa2feb26e75d..72a06e79c98d 100644
--- a/.github/workflows/commitlint.yml
+++ b/.github/workflows/commitlint.yml
@@ -28,7 +28,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@3d28780bbf0365e29b144e272b2121204d5be5f3 # v6.1.2
diff --git a/.github/workflows/crds-gen.yml b/.github/workflows/crds-gen.yml
index e243349210ae..1d98ca06458f 100644
--- a/.github/workflows/crds-gen.yml
+++ b/.github/workflows/crds-gen.yml
@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/daily-nightly-jobs.yml b/.github/workflows/daily-nightly-jobs.yml
index af1676170776..ba0e412c55b4 100644
--- a/.github/workflows/daily-nightly-jobs.yml
+++ b/.github/workflows/daily-nightly-jobs.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -123,7 +123,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -163,7 +163,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -203,7 +203,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -243,7 +243,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -283,7 +283,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -323,7 +323,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -363,7 +363,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -404,7 +404,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -444,7 +444,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml
index 6a61cd4adc38..731802c6884b 100644
--- a/.github/workflows/docs-check.yml
+++ b/.github/workflows/docs-check.yml
@@ -24,7 +24,7 @@ jobs:
name: docs-check
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml
index a6975e397875..783da5e6539a 100644
--- a/.github/workflows/golangci-lint.yaml
+++ b/.github/workflows/golangci-lint.yaml
@@ -24,7 +24,7 @@ jobs:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml
index 013074f2ec48..c3f1d924bc7c 100644
--- a/.github/workflows/helm-lint.yaml
+++ b/.github/workflows/helm-lint.yaml
@@ -26,7 +26,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-helm-suite.yaml b/.github/workflows/integration-test-helm-suite.yaml
index 8b9d7e42682d..78839a7c2985 100644
--- a/.github/workflows/integration-test-helm-suite.yaml
+++ b/.github/workflows/integration-test-helm-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-keystone-auth-suite.yaml b/.github/workflows/integration-test-keystone-auth-suite.yaml
index dada9abca9cf..2c3239102a46 100644
--- a/.github/workflows/integration-test-keystone-auth-suite.yaml
+++ b/.github/workflows/integration-test-keystone-auth-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-mgr-suite.yaml b/.github/workflows/integration-test-mgr-suite.yaml
index d88e14f67321..15eb3bfb910e 100644
--- a/.github/workflows/integration-test-mgr-suite.yaml
+++ b/.github/workflows/integration-test-mgr-suite.yaml
@@ -30,7 +30,7 @@ jobs:
kubernetes-versions: ["v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-multi-cluster-suite.yaml b/.github/workflows/integration-test-multi-cluster-suite.yaml
index 101fbf2e3841..f250586fad66 100644
--- a/.github/workflows/integration-test-multi-cluster-suite.yaml
+++ b/.github/workflows/integration-test-multi-cluster-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-object-suite.yaml b/.github/workflows/integration-test-object-suite.yaml
index b50e26f52c39..f2757fec4fc1 100644
--- a/.github/workflows/integration-test-object-suite.yaml
+++ b/.github/workflows/integration-test-object-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-smoke-suite.yaml b/.github/workflows/integration-test-smoke-suite.yaml
index 829f1a14459a..d0fd057c1409 100644
--- a/.github/workflows/integration-test-smoke-suite.yaml
+++ b/.github/workflows/integration-test-smoke-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-test-upgrade-suite.yaml b/.github/workflows/integration-test-upgrade-suite.yaml
index 26c6e2fbf7dc..77ae326954db 100644
--- a/.github/workflows/integration-test-upgrade-suite.yaml
+++ b/.github/workflows/integration-test-upgrade-suite.yaml
@@ -31,7 +31,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -75,7 +75,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/integration-tests-on-release.yaml b/.github/workflows/integration-tests-on-release.yaml
index acc16b6987b9..6af0618795e1 100644
--- a/.github/workflows/integration-tests-on-release.yaml
+++ b/.github/workflows/integration-tests-on-release.yaml
@@ -24,7 +24,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -64,7 +64,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -105,7 +105,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -143,7 +143,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -181,7 +181,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -222,7 +222,7 @@ jobs:
kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml
index d995b051c75e..36aa1208e06e 100644
--- a/.github/workflows/linters.yaml
+++ b/.github/workflows/linters.yaml
@@ -23,7 +23,7 @@ jobs:
yaml-linter:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
@@ -41,7 +41,7 @@ jobs:
pylint:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/mod-check.yml b/.github/workflows/mod-check.yml
index 0e852fc61226..13c63f345ad2 100644
--- a/.github/workflows/mod-check.yml
+++ b/.github/workflows/mod-check.yml
@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/multus.yaml b/.github/workflows/multus.yaml
index 9511ae3fbfc7..51aa8cac22c9 100644
--- a/.github/workflows/multus.yaml
+++ b/.github/workflows/multus.yaml
@@ -36,7 +36,7 @@ jobs:
NUMBER_OF_COMPUTE_NODES: 5
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/push-build.yaml b/.github/workflows/push-build.yaml
index 6427b86e7c31..1f4e5479cd46 100644
--- a/.github/workflows/push-build.yaml
+++ b/.github/workflows/push-build.yaml
@@ -21,7 +21,7 @@ jobs:
if: github.repository == 'rook/rook'
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/rbac-gen.yaml b/.github/workflows/rbac-gen.yaml
index 7123d2a91ff3..842aa974ca49 100644
--- a/.github/workflows/rbac-gen.yaml
+++ b/.github/workflows/rbac-gen.yaml
@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 3e68e8d803cb..5dbe49f0b7b9 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -28,7 +28,7 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
persist-credentials: false
diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml
index ea9f97952b53..89a3551a53e3 100644
--- a/.github/workflows/shellcheck.yaml
+++ b/.github/workflows/shellcheck.yaml
@@ -24,7 +24,7 @@ jobs:
name: Shellcheck
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master
with:
diff --git a/.github/workflows/snyk.yaml b/.github/workflows/snyk.yaml
index b8c2459ba132..4aca00a56dbe 100644
--- a/.github/workflows/snyk.yaml
+++ b/.github/workflows/snyk.yaml
@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml
index 73c0f259f644..e7cbab9aa2a6 100644
--- a/.github/workflows/unit-test.yml
+++ b/.github/workflows/unit-test.yml
@@ -37,7 +37,7 @@ jobs:
if: "!contains(github.event.pull_request.labels.*.name, 'skip-ci')"
steps:
- name: checkout
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
with:
fetch-depth: 0
From e82925eb00be2e32a331a4f64b2242ec8ed6af07 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 30 Sep 2024 12:55:06 +0000
Subject: [PATCH 35/40] build(deps): bump github/codeql-action from 3.26.8 to
3.26.9
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.8 to 3.26.9.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/294a9d92911152fe08befb9ec03e240add280cb3...461ef6c76dfe95d5c364de2f431ddbd31a417628)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
.github/workflows/scorecards.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 3e68e8d803cb..b9d9c5358864 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -64,6 +64,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@294a9d92911152fe08befb9ec03e240add280cb3 # v3.26.8
+ uses: github/codeql-action/upload-sarif@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9
with:
sarif_file: results.sarif
From 21ca333ed787c6eaf358a4a754ffade22ae8ea7b Mon Sep 17 00:00:00 2001
From: parth-gr
Date: Mon, 30 Sep 2024 17:55:27 +0530
Subject: [PATCH 36/40] csi: add sc privileged to logrotate sidecar container
deployments on openshift where we need csi container pods
to be run on privileged, the logrotate sidecar container was missing the
privileged permission letting, side car to not run properly
Signed-off-by: parth-gr
---
.../ceph/csi/template/cephfs/csi-cephfsplugin-holder.yaml | 2 ++
.../template/cephfs/csi-cephfsplugin-provisioner-dep.yaml | 2 ++
pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml | 6 ++++++
.../ceph/csi/template/nfs/csi-nfsplugin-holder.yaml | 2 ++
.../csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml | 2 ++
.../ceph/csi/template/rbd/csi-rbdplugin-holder.yaml | 2 ++
.../csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml | 4 ++++
7 files changed, 20 insertions(+)
diff --git a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-holder.yaml b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-holder.yaml
index 74937c30f62b..911536032c84 100644
--- a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-holder.yaml
+++ b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-holder.yaml
@@ -38,6 +38,8 @@ spec:
# This is necessary for the Bidirectional mount propagation
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
image: {{ .CSIPluginImage }}
command:
- "/bin/sh"
diff --git a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml
index e2a81a4ac7f5..54bf92cd17f8 100644
--- a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml
+++ b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml
@@ -254,6 +254,8 @@ spec:
{{ if and .Privileged .CSILogRotation }}
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
{{ end }}
volumeMounts:
- name: socket-dir
diff --git a/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml b/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml
index e739862fa755..4a546e768e7b 100644
--- a/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml
+++ b/pkg/operator/ceph/csi/template/csi-logrotate-sidecar.yaml
@@ -28,6 +28,12 @@ command:
image: {{ .CSIPluginImage }}
imagePullPolicy: IfNotPresent
name: log-collector
+{{ if .Privileged }}
+securityContext:
+ privileged: true
+ capabilities:
+ drop: ["ALL"]
+{{ end }}
volumeMounts:
- mountPath: {{ .CsiLogRootPath }}/logrotate-config/{{ .CsiComponentName }}
name: csi-logs-logrotate
diff --git a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-holder.yaml b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-holder.yaml
index 5ee0e6f0674c..2a4c64cf803b 100644
--- a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-holder.yaml
+++ b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-holder.yaml
@@ -38,6 +38,8 @@ spec:
# This is necessary for the Bidirectional mount propagation
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
image: {{ .CSIPluginImage }}
command:
- "/bin/sh"
diff --git a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml
index 1b29209343fc..7fad6f7bf258 100644
--- a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml
+++ b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml
@@ -161,6 +161,8 @@ spec:
{{ if and .Privileged .CSILogRotation }}
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
{{ end }}
volumeMounts:
- name: socket-dir
diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-holder.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-holder.yaml
index 9be084d4b791..6191b9729116 100644
--- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-holder.yaml
+++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-holder.yaml
@@ -38,6 +38,8 @@ spec:
# This is necessary for the Bidirectional mount propagation
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
image: {{ .CSIPluginImage }}
command:
- "/bin/sh"
diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
index ed25616151ed..5f8b23974a3e 100644
--- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
+++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
@@ -207,6 +207,8 @@ spec:
{{ if and .Privileged .CSILogRotation }}
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
{{ end }}
volumeMounts:
- name: socket-dir
@@ -263,6 +265,8 @@ spec:
{{ if and .Privileged .CSILogRotation }}
securityContext:
privileged: true
+ capabilities:
+ drop: ["ALL"]
{{ end }}
volumeMounts:
- name: socket-dir
From 05ac99c0c011d8ecb4f5041e6b868eeba5cbd96b Mon Sep 17 00:00:00 2001
From: Xinliang Liu
Date: Thu, 26 Sep 2024 10:34:35 +0000
Subject: [PATCH 37/40] ci: fix canary-arm64 job
Fix OSD isn't up.
As sdb device might change to vdb in the runner, let
find_extra_block_dev() exclude the nbd devices and find the proper
extra device for OSD.
Clean up the nbd devices after the test job is running.
Fix logs artifact upload twice and collect logs before clean up.
Signed-off-by: Xinliang Liu
---
.github/workflows/daily-nightly-jobs.yml | 22 +++++++---------------
tests/scripts/github-action-helper.sh | 2 +-
2 files changed, 8 insertions(+), 16 deletions(-)
diff --git a/.github/workflows/daily-nightly-jobs.yml b/.github/workflows/daily-nightly-jobs.yml
index ba0e412c55b4..65fd14ccb35d 100644
--- a/.github/workflows/daily-nightly-jobs.yml
+++ b/.github/workflows/daily-nightly-jobs.yml
@@ -16,8 +16,6 @@ jobs:
canary-arm64:
runs-on: [self-hosted, ubuntu-20.04-arm64, ARM64]
if: github.repository == 'rook/rook'
- env:
- BLOCK: /dev/sdb
steps:
- name: checkout
@@ -91,6 +89,12 @@ jobs:
- name: wait for ceph to be ready
run: tests/scripts/github-action-helper.sh wait_for_ceph_to_be_ready "mon,mgr,osd,mds,rgw,rbd_mirror,fs_mirror" 1
+ - name: collect common logs
+ if: always()
+ uses: ./.github/workflows/collect-logs
+ with:
+ name: canary-arm64
+
- name: teardown minikube, docker and kubectl
if: always()
run: |
@@ -100,24 +104,12 @@ jobs:
sudo service docker stop
sudo rm -rf /usr/local/bin/minikube
sudo rm -rf /usr/local/bin/kubectl
+ sudo modprobe -r nbd
- name: remove /usr/bin/yq
if: always()
run: sudo rm -rf /usr/bin/yq
- - name: collect common logs
- if: always()
- uses: ./.github/workflows/collect-logs
- with:
- name: canary-arm64
-
- - name: upload canary test result
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
- if: always()
- with:
- name: canary-arm64
- path: test
-
smoke-suite-quincy-devel:
if: github.repository == 'rook/rook'
runs-on: ubuntu-22.04
diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh
index 277a6f5f5739..0756a0f77360 100755
--- a/tests/scripts/github-action-helper.sh
+++ b/tests/scripts/github-action-helper.sh
@@ -30,7 +30,7 @@ function find_extra_block_dev() {
boot_dev="$(sudo lsblk --noheading --list --output MOUNTPOINT,PKNAME | grep boot | awk '{print $2}')"
echo " == find_extra_block_dev(): boot_dev='$boot_dev'" >/dev/stderr # debug in case of future errors
# --nodeps ignores partitions
- extra_dev="$(sudo lsblk --noheading --list --nodeps --output KNAME | grep -v loop | grep -v "$boot_dev" | head -1)"
+ extra_dev="$(sudo lsblk --noheading --list --nodeps --output KNAME | egrep -v "($boot_dev|loop|nbd)" | head -1)"
echo " == find_extra_block_dev(): extra_dev='$extra_dev'" >/dev/stderr # debug in case of future errors
echo "$extra_dev" # output of function
}
From ab8fd90aa642ea670fd8786e4b9bbb04b6beb8e2 Mon Sep 17 00:00:00 2001
From: Michael Adam
Date: Thu, 26 Sep 2024 14:11:53 +0200
Subject: [PATCH 38/40] core: add ROOK_REVISION_HISTORY_LIMIT operator setting
This adds an operator config setting ROOK_REVISION_HISTORY_LIMIT
defaulting to kubernetes'value for RevisionHistoryLimit.
If configured, the provided value will be used as RevisionHistoryLimit
for all Deployments rook creates.
Fixes: #12722
Signed-off-by: Michael Adam
---
deploy/examples/operator-openshift.yaml | 2 ++
deploy/examples/operator.yaml | 2 ++
pkg/operator/ceph/cluster/mgr/spec.go | 1 +
pkg/operator/ceph/cluster/mon/spec.go | 1 +
.../ceph/cluster/nodedaemon/exporter.go | 1 +
pkg/operator/ceph/cluster/osd/spec.go | 1 +
pkg/operator/ceph/cluster/rbd/spec.go | 1 +
pkg/operator/ceph/controller.go | 1 +
.../ceph/controller/controller_utils.go | 26 +++++++++++++++++-
.../ceph/controller/controller_utils_test.go | 27 +++++++++++++++++++
pkg/operator/ceph/file/mds/spec.go | 5 ++--
pkg/operator/ceph/file/mirror/spec.go | 1 +
pkg/operator/ceph/nfs/spec.go | 1 +
pkg/operator/ceph/object/cosi/spec.go | 5 ++--
pkg/operator/ceph/object/spec.go | 1 +
pkg/operator/discover/discover.go | 1 +
16 files changed, 71 insertions(+), 6 deletions(-)
diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml
index 82ed8f14f141..105434007652 100644
--- a/deploy/examples/operator-openshift.yaml
+++ b/deploy/examples/operator-openshift.yaml
@@ -638,6 +638,8 @@ data:
# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
+ # RevisionHistoryLimit value for all deployments created by rook.
+ # ROOK_REVISION_HISTORY_LIMIT: "3"
---
# The deployment for the rook operator
# OLM: BEGIN OPERATOR DEPLOYMENT
diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml
index e0faa4d2a6b4..4802f5658f20 100644
--- a/deploy/examples/operator.yaml
+++ b/deploy/examples/operator.yaml
@@ -568,6 +568,8 @@ data:
# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
+ # RevisionHistoryLimit value for all deployments created by rook.
+ # ROOK_REVISION_HISTORY_LIMIT: "3"
---
# OLM: BEGIN OPERATOR DEPLOYMENT
apiVersion: apps/v1
diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go
index 01cfb5f2b617..1c38893d0789 100644
--- a/pkg/operator/ceph/cluster/mgr/spec.go
+++ b/pkg/operator/ceph/cluster/mgr/spec.go
@@ -128,6 +128,7 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error)
Labels: c.getPodLabels(mgrConfig, true),
},
Spec: apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: c.getPodLabels(mgrConfig, false),
},
diff --git a/pkg/operator/ceph/cluster/mon/spec.go b/pkg/operator/ceph/cluster/mon/spec.go
index 9d79ace7ba4e..8901f26bd919 100644
--- a/pkg/operator/ceph/cluster/mon/spec.go
+++ b/pkg/operator/ceph/cluster/mon/spec.go
@@ -108,6 +108,7 @@ func (c *Cluster) makeDeployment(monConfig *monConfig, canary bool) (*apps.Deplo
}
replicaCount := int32(1)
d.Spec = apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: c.getLabels(monConfig, canary, false),
},
diff --git a/pkg/operator/ceph/cluster/nodedaemon/exporter.go b/pkg/operator/ceph/cluster/nodedaemon/exporter.go
index dec1809eb163..ab4debc27ef1 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/exporter.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/exporter.go
@@ -77,6 +77,7 @@ func (r *ReconcileNode) createOrUpdateCephExporter(node corev1.Node, tolerations
Namespace: cephCluster.GetNamespace(),
},
}
+ deploy.Spec.RevisionHistoryLimit = controller.RevisionHistoryLimit()
err := controllerutil.SetControllerReference(&cephCluster, deploy, r.scheme)
if err != nil {
return controllerutil.OperationResultNone, errors.Errorf("failed to set owner reference of ceph-exporter deployment %q", deploy.Name)
diff --git a/pkg/operator/ceph/cluster/osd/spec.go b/pkg/operator/ceph/cluster/osd/spec.go
index b44aa4ee3225..33477cdbf88b 100644
--- a/pkg/operator/ceph/cluster/osd/spec.go
+++ b/pkg/operator/ceph/cluster/osd/spec.go
@@ -713,6 +713,7 @@ func (c *Cluster) makeDeployment(osdProps osdProperties, osd *OSDInfo, provision
OsdIdLabelKey: fmt.Sprintf("%d", osd.ID),
},
},
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
diff --git a/pkg/operator/ceph/cluster/rbd/spec.go b/pkg/operator/ceph/cluster/rbd/spec.go
index 2b846eae826d..dd627232b2f1 100644
--- a/pkg/operator/ceph/cluster/rbd/spec.go
+++ b/pkg/operator/ceph/cluster/rbd/spec.go
@@ -82,6 +82,7 @@ func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdM
Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, rbdMirror.Name, "cephrbdmirrors.ceph.rook.io", true),
},
Spec: apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: podSpec.Labels,
},
diff --git a/pkg/operator/ceph/controller.go b/pkg/operator/ceph/controller.go
index 238dd29bd43b..c674854a5727 100644
--- a/pkg/operator/ceph/controller.go
+++ b/pkg/operator/ceph/controller.go
@@ -133,6 +133,7 @@ func (r *ReconcileConfig) reconcile(request reconcile.Request) (reconcile.Result
opcontroller.SetAllowLoopDevices(r.config.Parameters)
opcontroller.SetEnforceHostNetwork(r.config.Parameters)
+ opcontroller.SetRevisionHistoryLimit(r.config.Parameters)
logger.Infof("%s done reconciling", controllerName)
return reconcile.Result{}, nil
diff --git a/pkg/operator/ceph/controller/controller_utils.go b/pkg/operator/ceph/controller/controller_utils.go
index f5cc1ca1da27..9d5e207522c4 100644
--- a/pkg/operator/ceph/controller/controller_utils.go
+++ b/pkg/operator/ceph/controller/controller_utils.go
@@ -54,6 +54,9 @@ const (
enforceHostNetworkSettingName string = "ROOK_ENFORCE_HOST_NETWORK"
enforceHostNetworkDefaultValue string = "false"
+ revisionHistoryLimitSettingName string = "ROOK_REVISION_HISTORY_LIMIT"
+ revisionHistoryLimitDefaultValue string = ""
+
// UninitializedCephConfigError refers to the error message printed by the Ceph CLI when there is no ceph configuration file
// This typically is raised when the operator has not finished initializing
UninitializedCephConfigError = "error calling conf_read_file"
@@ -86,7 +89,8 @@ var (
OperatorCephBaseImageVersion string
// loopDevicesAllowed indicates whether loop devices are allowed to be used
- loopDevicesAllowed = false
+ loopDevicesAllowed = false
+ revisionHistoryLimit *int32 = nil
)
func DiscoveryDaemonEnabled(data map[string]string) bool {
@@ -133,6 +137,26 @@ func EnforceHostNetwork() bool {
return cephv1.EnforceHostNetwork()
}
+func SetRevisionHistoryLimit(data map[string]string) {
+ strval := k8sutil.GetValue(data, revisionHistoryLimitSettingName, revisionHistoryLimitDefaultValue)
+ if strval != "" {
+ numval, err := strconv.ParseInt(strval, 10, 32)
+ if err != nil {
+ logger.Warningf("failed to parse value %q for %q. assuming default value.", strval, revisionHistoryLimitSettingName)
+ revisionHistoryLimit = nil
+ return
+
+ }
+ limit := int32(numval)
+ revisionHistoryLimit = &limit
+ }
+
+}
+
+func RevisionHistoryLimit() *int32 {
+ return revisionHistoryLimit
+}
+
// canIgnoreHealthErrStatusInReconcile determines whether a status of HEALTH_ERR in the CephCluster can be ignored safely.
func canIgnoreHealthErrStatusInReconcile(cephCluster cephv1.CephCluster, controllerName string) bool {
// Get a list of all the keys causing the HEALTH_ERR status.
diff --git a/pkg/operator/ceph/controller/controller_utils_test.go b/pkg/operator/ceph/controller/controller_utils_test.go
index 532cc0bcbecc..82774c8c7e06 100644
--- a/pkg/operator/ceph/controller/controller_utils_test.go
+++ b/pkg/operator/ceph/controller/controller_utils_test.go
@@ -133,6 +133,33 @@ func TestSetEnforceHostNetwork(t *testing.T) {
assert.False(t, EnforceHostNetwork())
}
+func TestSetRevisionHistoryLimit(t *testing.T) {
+ opConfig := map[string]string{}
+ t.Run("ROOK_REVISION_HISTORY_LIMIT: test default value", func(t *testing.T) {
+ SetRevisionHistoryLimit(opConfig)
+ assert.Nil(t, RevisionHistoryLimit())
+ })
+
+ var value string = "foo"
+ t.Run("ROOK_REVISION_HISTORY_LIMIT: test invalid value 'foo'", func(t *testing.T) {
+ opConfig[revisionHistoryLimitSettingName] = value
+ SetRevisionHistoryLimit(opConfig)
+ assert.Nil(t, RevisionHistoryLimit())
+ })
+
+ t.Run("ROOK_REVISION_HISTORY_LIMIT: test empty string value", func(t *testing.T) {
+ value = ""
+ opConfig[revisionHistoryLimitSettingName] = value
+ SetRevisionHistoryLimit(opConfig)
+ assert.Nil(t, RevisionHistoryLimit())
+ })
+ t.Run("ROOK_REVISION_HISTORY_LIMIT: test valig value '10'", func(t *testing.T) {
+ value = "10"
+ opConfig[revisionHistoryLimitSettingName] = value
+ SetRevisionHistoryLimit(opConfig)
+ assert.Equal(t, int32(10), *RevisionHistoryLimit())
+ })
+}
func TestIsReadyToReconcile(t *testing.T) {
scheme := scheme.Scheme
scheme.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{})
diff --git a/pkg/operator/ceph/file/mds/spec.go b/pkg/operator/ceph/file/mds/spec.go
index 426957a2409c..31065deef622 100644
--- a/pkg/operator/ceph/file/mds/spec.go
+++ b/pkg/operator/ceph/file/mds/spec.go
@@ -94,8 +94,9 @@ func (c *Cluster) makeDeployment(mdsConfig *mdsConfig, fsNamespacedname types.Na
Selector: &metav1.LabelSelector{
MatchLabels: c.podLabels(mdsConfig, false),
},
- Template: podSpec,
- Replicas: &replicas,
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
+ Template: podSpec,
+ Replicas: &replicas,
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
diff --git a/pkg/operator/ceph/file/mirror/spec.go b/pkg/operator/ceph/file/mirror/spec.go
index 8e9153b5bc28..d13d1848bec9 100644
--- a/pkg/operator/ceph/file/mirror/spec.go
+++ b/pkg/operator/ceph/file/mirror/spec.go
@@ -79,6 +79,7 @@ func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, f
Annotations: fsMirror.Spec.Annotations,
Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, fsMirror.Name, "cephfilesystemmirrors.ceph.rook.io", true)},
Spec: apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: podSpec.Labels,
},
diff --git a/pkg/operator/ceph/nfs/spec.go b/pkg/operator/ceph/nfs/spec.go
index 10edf9399f6c..5834acf5c781 100644
--- a/pkg/operator/ceph/nfs/spec.go
+++ b/pkg/operator/ceph/nfs/spec.go
@@ -191,6 +191,7 @@ func (r *ReconcileCephNFS) makeDeployment(nfs *cephv1.CephNFS, cfg daemonConfig)
// Multiple replicas of the nfs service would be handled by creating a service and a new deployment for each one, rather than increasing the pod count here
replicas := int32(1)
deployment.Spec = apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: getLabels(nfs, cfg.ID, false),
},
diff --git a/pkg/operator/ceph/object/cosi/spec.go b/pkg/operator/ceph/object/cosi/spec.go
index 50bf737c5bab..f0cc0a5a990e 100644
--- a/pkg/operator/ceph/object/cosi/spec.go
+++ b/pkg/operator/ceph/object/cosi/spec.go
@@ -43,7 +43,6 @@ func createCephCOSIDriverDeployment(cephCOSIDriver *cephv1.CephCOSIDriver) (*app
replica := int32(1)
minReadySeconds := int32(30)
progressDeadlineSeconds := int32(600)
- revisionHistoryLimit := int32(3)
cephcosidriverDeployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
@@ -52,7 +51,8 @@ func createCephCOSIDriverDeployment(cephCOSIDriver *cephv1.CephCOSIDriver) (*app
Labels: getCOSILabels(cephCOSIDriver.Name, cephCOSIDriver.Namespace),
},
Spec: appsv1.DeploymentSpec{
- Replicas: &replica,
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
+ Replicas: &replica,
Selector: &metav1.LabelSelector{
MatchLabels: getCOSILabels(cephCOSIDriver.Name, cephCOSIDriver.Namespace),
},
@@ -60,7 +60,6 @@ func createCephCOSIDriverDeployment(cephCOSIDriver *cephv1.CephCOSIDriver) (*app
Strategy: strategy,
MinReadySeconds: minReadySeconds,
ProgressDeadlineSeconds: &progressDeadlineSeconds,
- RevisionHistoryLimit: &revisionHistoryLimit,
},
}
diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go
index cd34dab8d9fa..b72910fb9b55 100644
--- a/pkg/operator/ceph/object/spec.go
+++ b/pkg/operator/ceph/object/spec.go
@@ -117,6 +117,7 @@ func (c *clusterConfig) createDeployment(rgwConfig *rgwConfig) (*apps.Deployment
Labels: getLabels(c.store.Name, c.store.Namespace, true),
},
Spec: apps.DeploymentSpec{
+ RevisionHistoryLimit: controller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: getLabels(c.store.Name, c.store.Namespace, false),
},
diff --git a/pkg/operator/discover/discover.go b/pkg/operator/discover/discover.go
index ee4cdb3f6369..b895badd7e84 100644
--- a/pkg/operator/discover/discover.go
+++ b/pkg/operator/discover/discover.go
@@ -102,6 +102,7 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco
Labels: getLabels(),
},
Spec: apps.DaemonSetSpec{
+ RevisionHistoryLimit: opcontroller.RevisionHistoryLimit(),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": discoverDaemonsetName,
From 20f00312805a2b17fb89a465614f931e19a12567 Mon Sep 17 00:00:00 2001
From: Michael Adam
Date: Wed, 2 Oct 2024 16:23:42 +0200
Subject: [PATCH 39/40] helm: add revisionHistoryLimit setting
Co-authored-by: Travis Nielsen
Signed-off-by: Michael Adam
---
Documentation/Helm-Charts/operator-chart.md | 1 +
deploy/charts/rook-ceph/templates/configmap.yaml | 3 +++
deploy/charts/rook-ceph/values.yaml | 3 +++
3 files changed, 7 insertions(+)
diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md
index ebb42204fb1f..07acac83603e 100644
--- a/Documentation/Helm-Charts/operator-chart.md
+++ b/Documentation/Helm-Charts/operator-chart.md
@@ -163,6 +163,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `rbacAggregate.enableOBCs` | If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims | `false` |
| `rbacEnable` | If true, create & use RBAC resources | `true` |
| `resources` | Pod resource requests & limits | `{"limits":{"memory":"512Mi"},"requests":{"cpu":"200m","memory":"128Mi"}}` |
+| `revisionHistoryLimit` | The revision history limit for all pods created by Rook. If blank, the K8s default is 10. | `nil` |
| `scaleDownOperator` | If true, scale down the rook operator. This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling to deploy your helm charts. | `false` |
| `tolerations` | List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment. | `[]` |
| `unreachableNodeTolerationSeconds` | Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override the Kubernetes default of 5 minutes | `5` |
diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml
index ea1c5230f107..f0f8987dc0d5 100644
--- a/deploy/charts/rook-ceph/templates/configmap.yaml
+++ b/deploy/charts/rook-ceph/templates/configmap.yaml
@@ -20,6 +20,9 @@ data:
{{- if .Values.discoverDaemonUdev }}
DISCOVER_DAEMON_UDEV_BLACKLIST: {{ .Values.discoverDaemonUdev | quote }}
{{- end }}
+{{- if .Values.revisionHistoryLimit }}
+ ROOK_REVISION_HISTORY_LIMIT: {{ .Values.revisionHistoryLimit | quote }}
+{{- end }}
{{- if .Values.csi }}
ROOK_CSI_ENABLE_RBD: {{ .Values.csi.enableRbdDriver | quote }}
ROOK_CSI_ENABLE_CEPHFS: {{ .Values.csi.enableCephfsDriver | quote }}
diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml
index 4cf7c298d6a4..c630bcd33b23 100644
--- a/deploy/charts/rook-ceph/values.yaml
+++ b/deploy/charts/rook-ceph/values.yaml
@@ -636,6 +636,9 @@ hostpathRequiresPrivileged: false
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
+# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
+revisionHistoryLimit:
+
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
From 810de394e7196a185b4544e98311109e1d1f305b Mon Sep 17 00:00:00 2001
From: Travis Nielsen
Date: Wed, 2 Oct 2024 07:51:05 -0600
Subject: [PATCH 40/40] helm: add enforce host network setting
The ROOK_ENFORCE_HOST_NETWORK option was implemented recently
and now we add the helm setting to expose this new setting
in the rook chart.
Signed-off-by: Travis Nielsen
---
Documentation/Helm-Charts/operator-chart.md | 1 +
deploy/charts/rook-ceph/templates/configmap.yaml | 4 ++++
deploy/charts/rook-ceph/values.yaml | 3 +++
deploy/examples/operator-openshift.yaml | 4 ++++
deploy/examples/operator.yaml | 4 ++++
tests/framework/installer/ceph_helm_installer.go | 2 ++
6 files changed, 18 insertions(+)
diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md
index 07acac83603e..a78ddc1ad4e7 100644
--- a/Documentation/Helm-Charts/operator-chart.md
+++ b/Documentation/Helm-Charts/operator-chart.md
@@ -149,6 +149,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `discoveryDaemonInterval` | Set the discovery daemon device discovery interval (default to 60m) | `"60m"` |
| `enableDiscoveryDaemon` | Enable discovery daemon | `false` |
| `enableOBCWatchOperatorNamespace` | Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used | `true` |
+| `enforceHostNetwork` | Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled | `false` |
| `hostpathRequiresPrivileged` | Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions. | `false` |
| `image.pullPolicy` | Image pull policy | `"IfNotPresent"` |
| `image.repository` | Image | `"docker.io/rook/ceph"` |
diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml
index f0f8987dc0d5..c29dbcd74fc9 100644
--- a/deploy/charts/rook-ceph/templates/configmap.yaml
+++ b/deploy/charts/rook-ceph/templates/configmap.yaml
@@ -23,6 +23,10 @@ data:
{{- if .Values.revisionHistoryLimit }}
ROOK_REVISION_HISTORY_LIMIT: {{ .Values.revisionHistoryLimit | quote }}
{{- end }}
+{{- if .Values.enforceHostNetwork }}
+ ROOK_ENFORCE_HOST_NETWORK: {{ .Values.enforceHostNetwork | quote }}
+{{- end }}
+
{{- if .Values.csi }}
ROOK_CSI_ENABLE_RBD: {{ .Values.csi.enableRbdDriver | quote }}
ROOK_CSI_ENABLE_CEPHFS: {{ .Values.csi.enableCephfsDriver | quote }}
diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml
index c630bcd33b23..7e3d47bb3c04 100644
--- a/deploy/charts/rook-ceph/values.yaml
+++ b/deploy/charts/rook-ceph/values.yaml
@@ -633,6 +633,9 @@ discover:
# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
+# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
+enforceHostNetwork: false
+
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml
index 105434007652..876a029f0df3 100644
--- a/deploy/examples/operator-openshift.yaml
+++ b/deploy/examples/operator-openshift.yaml
@@ -638,6 +638,10 @@ data:
# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
+
+ # Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
+ ROOK_ENFORCE_HOST_NETWORK: "false"
+
# RevisionHistoryLimit value for all deployments created by rook.
# ROOK_REVISION_HISTORY_LIMIT: "3"
---
diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml
index 4802f5658f20..667c718ba8b6 100644
--- a/deploy/examples/operator.yaml
+++ b/deploy/examples/operator.yaml
@@ -568,6 +568,10 @@ data:
# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
+
+ # Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
+ ROOK_ENFORCE_HOST_NETWORK: "false"
+
# RevisionHistoryLimit value for all deployments created by rook.
# ROOK_REVISION_HISTORY_LIMIT: "3"
---
diff --git a/tests/framework/installer/ceph_helm_installer.go b/tests/framework/installer/ceph_helm_installer.go
index a8042469cae8..13d1c69fb63d 100644
--- a/tests/framework/installer/ceph_helm_installer.go
+++ b/tests/framework/installer/ceph_helm_installer.go
@@ -57,6 +57,8 @@ func (h *CephInstaller) configureRookOperatorViaHelm(upgrade bool) error {
"enableDiscoveryDaemon": h.settings.EnableDiscovery,
"image": map[string]interface{}{"tag": h.settings.RookVersion},
"monitoring": map[string]interface{}{"enabled": true},
+ "revisionHistoryLimit": "3",
+ "enforceHostNetwork": "false",
}
values["csi"] = map[string]interface{}{
"csiRBDProvisionerResource": nil,