diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml
index dc1f937348c8..216d12a25fd7 100644
--- a/.github/workflows/commitlint.yml
+++ b/.github/workflows/commitlint.yml
@@ -16,8 +16,14 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
+permissions:
+ contents: read
+
jobs:
lint:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ pull-requests: read # for wagoid/commitlint-github-action to get commits in PR
runs-on: ubuntu-20.04
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml
index e0bff0982c00..a99175d28cef 100644
--- a/.github/workflows/docs-check.yml
+++ b/.github/workflows/docs-check.yml
@@ -16,6 +16,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
+permissions:
+ contents: read
+
jobs:
docs-check:
name: docs-check
diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml
index 45943f80a62d..1e9777e38228 100644
--- a/.github/workflows/golangci-lint.yaml
+++ b/.github/workflows/golangci-lint.yaml
@@ -44,3 +44,10 @@ jobs:
# Optional: show only new issues if it's a pull request. The default value is `false`.
# only-new-issues: true
+
+ govulncheck:
+ name: govulncheck
+ runs-on: ubuntu-latest
+ steps:
+ - name: govulncheck
+ uses: golang/govulncheck-action@v1
diff --git a/.snyk b/.snyk
index c9c5fee1271f..8d22a93f238d 100644
--- a/.snyk
+++ b/.snyk
@@ -3,13 +3,13 @@ ignore:
"snyk:lic:golang:github.com:hashicorp:vault:sdk:MPL-2.0":
- "*":
reason: Mozilla Public License 2.0 is compatible with Rook's Apache 2.0 license
- "snyk:lic:golang:github.com:hashicorp:vault:MPL-2.0":
+ "snyk:lic:golang:github.com:hashicorp:vault:api:MPL-2.0":
- "*":
reason: Mozilla Public License 2.0 is compatible with Rook's Apache 2.0 license
- "snyk:lic:golang:github.com:hashicorp:vault:api:auth:approle:MPL-2.0":
+ "snyk:lic:golang:github.com:hashicorp:vault:api:auth:kubernetes:MPL-2.0":
- "*":
reason: Mozilla Public License 2.0 is compatible with Rook's Apache 2.0 license
- "snyk:lic:golang:github.com:hashicorp:vault:api:MPL-2.0":
+ "snyk:lic:golang:github.com:hashicorp:vault:api:auth:approle:MPL-2.0":
- "*":
reason: Mozilla Public License 2.0 is compatible with Rook's Apache 2.0 license
"snyk:lic:golang:github.com:hashicorp:hcl:MPL-2.0":
@@ -45,5 +45,3 @@ ignore:
"snyk:lic:golang:github.com:hashicorp:errwrap:MPL-2.0":
- "*":
reason: Mozilla Public License 2.0 is compatible with Rook's Apache 2.0 license
-version: v1.25.0
-patch: {}
diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
index 5b93adf00f38..db26fa9e02a4 100755
--- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md
+++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
@@ -101,7 +101,7 @@ Official releases of Ceph Container images are available from [Docker Hub](https
These are general purpose Ceph container with all necessary daemons and dependencies installed.
| TAG | MEANING |
-| -------------------- | --------------------------------------------------------- |
+|----------------------|-----------------------------------------------------------|
| vRELNUM | Latest release in this series (e.g., *v17* = Quincy) |
| vRELNUM.Y | Latest stable release in this stable series (e.g., v17.2) |
| vRELNUM.Y.Z | A specific release (e.g., v17.2.6) |
@@ -421,7 +421,7 @@ Below are the settings for host-based cluster. This type of cluster can specify
* `name`: The name of the devices and partitions (e.g., `sda`). The full udev path can also be specified for devices, partitions, and logical volumes (e.g. `/dev/disk/by-id/ata-ST4000DM004-XXXX` - this will not change after reboots).
* `config`: Device-specific config settings. See the [config settings](#osd-configuration-settings) below
-Host-based cluster supports raw device, partition, and logical volume. Be sure to see the
+Host-based cluster supports raw devices, partitions, logical volumes, encrypted devices, and multipath devices. Be sure to see the
[quickstart doc prerequisites](../../Getting-Started/quickstart.md#prerequisites) for additional considerations.
Below are the settings for a PVC-based cluster.
@@ -456,13 +456,17 @@ The following are the settings for Storage Class Device Sets which can be config
* `tuneDeviceClass`: For example, Ceph cannot detect AWS volumes as HDDs from the storage class "gp2", so you can improve Ceph performance by setting this to true.
* `tuneFastDeviceClass`: For example, Ceph cannot detect Azure disks as SSDs from the storage class "managed-premium", so you can improve Ceph performance by setting this to true..
* `volumeClaimTemplates`: A list of PVC templates to use for provisioning the underlying storage devices.
+ * `metadata.name`: "data", "metadata", or "wal". If a single template is provided, the name must be "data". If the name is "metadata" or "wal", the devices are used to store the Ceph metadata or WAL respectively. In both cases, the devices must be raw devices or LVM logical volumes.
+
* `resources.requests.storage`: The desired capacity for the underlying storage devices.
- * `storageClassName`: The StorageClass to provision PVCs from. Default would be to use the cluster-default StorageClass. This StorageClass should provide a raw block device, multipath device, or logical volume. Other types are not supported. If you want to use logical volume, please see [known issue of OSD on LV-backed PVC](../../Troubleshooting/ceph-common-issues.md#lvm-metadata-can-be-corrupted-with-osd-on-lv-backed-pvc)
+ * `storageClassName`: The StorageClass to provision PVCs from. Default would be to use the cluster-default StorageClass.
* `volumeMode`: The volume mode to be set for the PVC. Which should be Block
* `accessModes`: The access mode for the PVC to be bound by OSD.
* `schedulerName`: Scheduler name for OSD pod placement. (Optional)
* `encrypted`: whether to encrypt all the OSDs in a given storageClassDeviceSet
+See the table in [OSD Configuration Settings](#osd-configuration-settings) to know the allowed configurations.
+
### OSD Configuration Settings
The following storage selection settings are specific to Ceph and do not apply to other backends. All variables are key-value pairs represented as strings.
@@ -477,6 +481,16 @@ The following storage selection settings are specific to Ceph and do not apply t
* `encryptedDevice`**: Encrypt OSD volumes using dmcrypt ("true" or "false"). By default this option is disabled. See [encryption](http://docs.ceph.com/docs/master/ceph-volume/lvm/encryption/) for more information on encryption in Ceph.
* `crushRoot`: The value of the `root` CRUSH map label. The default is `default`. Generally, you should not need to change this. However, if any of your topology labels may have the value `default`, you need to change `crushRoot` to avoid conflicts, since CRUSH map values need to be unique.
+Allowed configurations are:
+
+| block device type | host-based cluster | PVC-based cluster |
+|:------------------|:------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------|
+| disk | | |
+| part | `encryptedDevice` should be "false" | `encrypted` must be `false` |
+| lvm | `metadataDevice` should be "", `osdsPerDevice` should be "1", and `encryptedDevice` should be "false" | `metadata.name` must not be `metadata` or `wal` and `encrypted` must be `false` |
+| crypt | | |
+| mpath | | |
+
### Annotations and Labels
Annotations and Labels can be specified so that the Rook components will have those annotations / labels added to them.
diff --git a/Documentation/Contributing/ci-configuration.md b/Documentation/Contributing/ci-configuration.md
index 5365d77c6e25..bc218c735088 100644
--- a/Documentation/Contributing/ci-configuration.md
+++ b/Documentation/Contributing/ci-configuration.md
@@ -7,7 +7,7 @@ This page contains information regarding the CI configuration used for the Rook
## Secrets
* Snyk (Security Scan):
- * `SNYK_TOKEN` - API Token for the [snyk security scanner](https://snyk.io/) (workflow file: `synk.yaml`).
+ * `SNYK_TOKEN` - API Token for the [snyk security scanner](https://snyk.io/) (workflow file: `snyk.yaml`).
* Testing:
* `IBM_INSTANCE_ID`: Used for KMS (Key Management System) IBM Key Protect access (see [`.github/workflows/encryption-pvc-kms-ibm-kp/action.yml`](https://github.com/rook/rook/blob/master/.github/workflows/encryption-pvc-kms-ibm-kp/action.yml)).
* `IBM_SERVICE_API_KEY`: Used for KMS (Key Management System) IBM Key Protect access (see [`.github/workflows/encryption-pvc-kms-ibm-kp/action.yml`](https://github.com/rook/rook/blob/master/.github/workflows/encryption-pvc-kms-ibm-kp/action.yml)).
diff --git a/Documentation/Getting-Started/Prerequisites/prerequisites.md b/Documentation/Getting-Started/Prerequisites/prerequisites.md
index 6cb7977b455f..d05e31a2eac2 100644
--- a/Documentation/Getting-Started/Prerequisites/prerequisites.md
+++ b/Documentation/Getting-Started/Prerequisites/prerequisites.md
@@ -52,6 +52,7 @@ Ceph OSDs have a dependency on LVM in the following scenarios:
* If encryption is enabled (`encryptedDevice: "true"` in the cluster CR)
* A `metadata` device is specified
+* `osdsPerDevice` is greater than 1
LVM is not required for OSDs in these scenarios:
diff --git a/Documentation/Getting-Started/quickstart.md b/Documentation/Getting-Started/quickstart.md
index 6a058fb53e7e..e598c41e6451 100644
--- a/Documentation/Getting-Started/quickstart.md
+++ b/Documentation/Getting-Started/quickstart.md
@@ -24,9 +24,11 @@ To check if a Kubernetes cluster is ready for `Rook`, see the [prerequisites](Pr
To configure the Ceph storage cluster, at least one of these local storage options are required:
-* Raw devices (no partitions or formatted filesystems)
+* Raw devices (no partitions or formatted filesystem)
* Raw partitions (no formatted filesystem)
* LVM Logical Volumes (no formatted filesystem)
+* Encrypted devices (no formatted filesystem)
+* Multipath devices (no formatted filesystem)
* Persistent Volumes available from a storage class in `block` mode
## TL;DR
diff --git a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
index 5b9ff44a459f..354c8eea2400 100644
--- a/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
+++ b/Documentation/Storage-Configuration/Advanced/ceph-configuration.md
@@ -23,6 +23,14 @@ to also change `ROOK_OPERATOR_NAMESPACE` to create a new Rook Operator for each
forget to set `ROOK_CURRENT_NAMESPACE_ONLY`), or you can leave it at the same value for every
Ceph cluster if you only wish to have one Operator manage all Ceph clusters.
+If the operator namespace is different from the cluster namespace, the operator namespace must be
+created before running the steps below. The cluster namespace does not need to be created first,
+as it will be created by `common.yaml` in the script below.
+
+```console
+kubectl create namespace $ROOK_OPERATOR_NAMESPACE
+```
+
This will help you manage namespaces more easily, but you should still make sure the resources are
configured to your liking.
@@ -47,12 +55,23 @@ kubectl apply -f common.yaml -f operator.yaml -f cluster.yaml # add other files
## Deploying a second cluster
-If you wish to create a new CephCluster in a different namespace than `rook-ceph` while using a single operator to manage both clusters execute the following:
+If you wish to create a new CephCluster in a separate namespace, you can easily do so
+by modifying the `ROOK_OPERATOR_NAMESPACE` and `SECOND_ROOK_CLUSTER_NAMESPACE` values in the
+below instructions. The default configuration in `common-second-cluster.yaml` is already
+set up to utilize `rook-ceph` for the operator and `rook-ceph-secondary` for the cluster.
+There's no need to run the `sed` command if you prefer to use these default values.
```console
cd deploy/examples
+export ROOK_OPERATOR_NAMESPACE="rook-ceph"
+export SECOND_ROOK_CLUSTER_NAMESPACE="rook-ceph-secondary"
+
+sed -i.bak \
+ -e "s/\(.*\):.*# namespace:operator/\1: $ROOK_OPERATOR_NAMESPACE # namespace:operator/g" \
+ -e "s/\(.*\):.*# namespace:cluster/\1: $SECOND_ROOK_CLUSTER_NAMESPACE # namespace:cluster/g" \
+ common-second-cluster.yaml
-NAMESPACE=rook-ceph-secondary envsubst < common-second-cluster.yaml | kubectl create -f -
+kubectl create -f common-second-cluster.yaml
```
This will create all the necessary RBACs as well as the new namespace. The script assumes that `common.yaml` was already created.
diff --git a/Documentation/Storage-Configuration/Monitoring/ceph-dashboard.md b/Documentation/Storage-Configuration/Monitoring/ceph-dashboard.md
index a2053a19f53b..684a0bf009ca 100755
--- a/Documentation/Storage-Configuration/Monitoring/ceph-dashboard.md
+++ b/Documentation/Storage-Configuration/Monitoring/ceph-dashboard.md
@@ -133,6 +133,7 @@ spec:
selector:
app: rook-ceph-mgr
rook_cluster: rook-ceph
+ mgr_role: active
sessionAffinity: None
type: NodePort
```
diff --git a/Documentation/Troubleshooting/kubectl-plugin.md b/Documentation/Troubleshooting/kubectl-plugin.md
index c4f80338f334..0cb210fad66d 100644
--- a/Documentation/Troubleshooting/kubectl-plugin.md
+++ b/Documentation/Troubleshooting/kubectl-plugin.md
@@ -19,7 +19,7 @@ See the [kubectl-rook-ceph documentation](https://github.com/rook/kubectl-rook-c
- Install Rook plugin
```console
- kubectl kubectl install rook-ceph
+ kubectl krew install rook-ceph
```
## Ceph Commands
diff --git a/Documentation/Upgrade/ceph-upgrade.md b/Documentation/Upgrade/ceph-upgrade.md
index f60f793f59d1..a302940b3081 100644
--- a/Documentation/Upgrade/ceph-upgrade.md
+++ b/Documentation/Upgrade/ceph-upgrade.md
@@ -85,7 +85,18 @@ NEW_CEPH_IMAGE='quay.io/ceph/ceph:v17.2.6-20230410'
kubectl -n $ROOK_CLUSTER_NAMESPACE patch CephCluster $ROOK_CLUSTER_NAMESPACE --type=merge -p "{\"spec\": {\"cephVersion\": {\"image\": \"$NEW_CEPH_IMAGE\"}}}"
```
-#### **2. Wait for the pod updates**
+#### **2. Update the toolbox image**
+
+Since the [Rook toolbox](https://rook.io/docs/rook/latest/Troubleshooting/ceph-toolbox/) is not controlled by
+the Rook operator, users must perform a manual upgrade by modifying the `image` to match the ceph version
+employed by the new Rook operator release. Employing an outdated Ceph version within the toolbox may result
+in unexpected behaviour.
+
+```console
+kubectl -n rook-ceph set image deploy/rook-ceph-tools rook-ceph-tools=quay.io/ceph/ceph:v17.2.6-20230410
+```
+
+#### **3. Wait for the pod updates**
As with upgrading Rook, now wait for the upgrade to complete. Status can be determined in a similar
way to the Rook upgrade as well.
@@ -105,6 +116,6 @@ This cluster is finished:
ceph-version=v17.2.6-0
```
-#### **3. Verify cluster health**
+#### **4. Verify cluster health**
Verify the Ceph cluster's health using the [health verification](health-verification.md).
diff --git a/README.md b/README.md
index 9124ee8a7bd5..4c2edd78e4da 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
[![Docker Pulls](https://img.shields.io/docker/pulls/rook/ceph)](https://hub.docker.com/u/rook)
[![Go Report Card](https://goreportcard.com/badge/github.com/rook/rook)](https://goreportcard.com/report/github.com/rook/rook)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1599/badge)](https://bestpractices.coreinfrastructure.org/projects/1599)
-[![Security scanning](https://github.com/rook/rook/actions/workflows/synk.yaml/badge.svg)](https://github.com/rook/rook/actions/workflows/synk.yaml)
+[![Security scanning](https://github.com/rook/rook/actions/workflows/snyk.yaml/badge.svg)](https://github.com/rook/rook/actions/workflows/snyk.yaml)
[![Slack](https://img.shields.io/badge/rook-slack-blue)](https://slack.rook.io)
[![Twitter Follow](https://img.shields.io/twitter/follow/rook_io.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=rook_io&user_id=788180534543339520)
diff --git a/build/csv/csv-gen.sh b/build/csv/csv-gen.sh
index a75c67ea2a53..e55d3f448563 100755
--- a/build/csv/csv-gen.sh
+++ b/build/csv/csv-gen.sh
@@ -43,12 +43,14 @@ function generate_csv() {
# This change are just to make the CSV file as it was earlier and as ocs-operator reads.
# Skipping this change for darwin since `sed -i` doesn't work with darwin properly.
# and the csv is not ever needed in the mac builds.
- if [[ "$OSTYPE" != "darwin"* ]]; then
- sed -i 's/image: rook\/ceph:.*/image: {{.RookOperatorImage}}/g' "$CSV_FILE_NAME"
- sed -i 's/name: rook-ceph.v.*/name: rook-ceph.v{{.RookOperatorCsvVersion}}/g' "$CSV_FILE_NAME"
- sed -i 's/version: 0.0.0/version: {{.RookOperatorCsvVersion}}/g' "$CSV_FILE_NAME"
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ return
fi
+ sed -i 's/image: rook\/ceph:.*/image: {{.RookOperatorImage}}/g' "$CSV_FILE_NAME"
+ sed -i 's/name: rook-ceph.v.*/name: rook-ceph.v{{.RookOperatorCsvVersion}}/g' "$CSV_FILE_NAME"
+ sed -i 's/version: 0.0.0/version: {{.RookOperatorCsvVersion}}/g' "$CSV_FILE_NAME"
+
mv "$CSV_FILE_NAME" "../../build/csv/"
mv "../../build/csv/ceph/$PLATFORM/manifests/"* "../../build/csv/ceph/"
rm -rf "../../build/csv/ceph/$PLATFORM"
diff --git a/build/release/requirements_docs.txt b/build/release/requirements_docs.txt
index 6833874ebc1c..56068eba11c9 100644
--- a/build/release/requirements_docs.txt
+++ b/build/release/requirements_docs.txt
@@ -3,7 +3,7 @@ mkdocs
mkdocs-awesome-pages-plugin
mkdocs-exclude
mkdocs-macros-plugin
-mkdocs-material==8.*
+mkdocs-material
mkdocs-material-extensions
mkdocs-minify-plugin
mkdocs-redirects
diff --git a/deploy/examples/cluster-on-local-pvc.yaml b/deploy/examples/cluster-on-local-pvc.yaml
index 2963c1720528..4e9eadd99a59 100644
--- a/deploy/examples/cluster-on-local-pvc.yaml
+++ b/deploy/examples/cluster-on-local-pvc.yaml
@@ -160,7 +160,7 @@ apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:cluster
spec:
dataDirHostPath: /var/lib/rook
mon:
diff --git a/deploy/examples/common-second-cluster.yaml b/deploy/examples/common-second-cluster.yaml
index 684868e6c625..a7b7ff72d01a 100644
--- a/deploy/examples/common-second-cluster.yaml
+++ b/deploy/examples/common-second-cluster.yaml
@@ -7,13 +7,13 @@
apiVersion: v1
kind: Namespace
metadata:
- name: $NAMESPACE
+ name: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cluster-mgmt
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -21,13 +21,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-system
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:operator
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -35,13 +35,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -49,13 +49,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-cmd-reporter
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: rook-ceph-mgr-system
- namespace: $NAMESPACE
+ name: rook-ceph-mgr-system-secondary
+ namespace: rook-ceph # namespace:operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -63,13 +63,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-mgr-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -77,13 +77,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-mgr
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -91,13 +91,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-purge-osd-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -105,13 +105,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-purge-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-rgw-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -119,13 +119,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-rgw
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-default-psp
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -133,19 +133,19 @@ roleRef:
subjects:
- kind: ServiceAccount
name: default
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-cmd-reporter
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-cmd-reporter
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
rules:
- apiGroups:
- ""
@@ -164,7 +164,7 @@ kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
rules:
# this is needed for rook's "key-management" CLI to fetch the vault token from the secret when
# validating the connection details and for key rotation operations.
@@ -202,13 +202,13 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-osd-external
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -216,26 +216,26 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-mgr
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
# Aspects of ceph osd purge job that require access to the operator/cluster namespace
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-purge-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
rules:
- apiGroups: [""]
resources: ["configmaps"]
@@ -255,7 +255,7 @@ kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-ceph-purge-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -263,16 +263,16 @@ roleRef:
subjects:
- kind: ServiceAccount
name: rook-ceph-purge-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-purge-osd
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-ceph-rgw
- namespace: $NAMESPACE
+ namespace: rook-ceph-secondary # namespace:cluster
diff --git a/deploy/examples/create-external-cluster-resources-tests.py b/deploy/examples/create-external-cluster-resources-tests.py
index df37f0d0ca4b..b3a5196c7493 100644
--- a/deploy/examples/create-external-cluster-resources-tests.py
+++ b/deploy/examples/create-external-cluster-resources-tests.py
@@ -240,11 +240,22 @@ def test_skip_monitoring_endpoint_no_prometheus(self):
del cmd_json_out["mgrmap"]["services"]["prometheus"]
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
- endpoint, port = self.rjObj.get_active_and_standby_mgrs()
+ endpoint = ""
+ port = ""
+ try:
+ endpoint, port = self.rjObj.get_active_and_standby_mgrs()
+ self.fail("An Exception was expected to be thrown")
+ except ext.ExecutionFailureException as err:
+ print(f"Successfully thrown error: {err}")
+
if endpoint != "" or port != "":
self.fail("Expected monitoring endpoint and port to be empty")
- self.rjObj.main()
+ try:
+ self.rjObj.main()
+ self.fail("An Exception was expected to be thrown")
+ except ext.ExecutionFailureException as err:
+ print(f"Successfully thrown error: {err}")
if self.rjObj.out_map["MONITORING_ENDPOINT"] != "":
self.fail("MONITORING_ENDPOINT should be empty")
diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py
index 148f7d68b7da..80014cb522b9 100644
--- a/deploy/examples/create-external-cluster-resources.py
+++ b/deploy/examples/create-external-cluster-resources.py
@@ -775,7 +775,10 @@ def get_active_and_standby_mgrs(self):
json_out.get("mgrmap", {}).get("services", {}).get("prometheus", "")
)
if not monitoring_endpoint:
- return "", ""
+ raise ExecutionFailureException(
+ "can't find monitoring_endpoint, prometheus module might not be enabled, "
+ "enable the module by running 'ceph mgr module enable prometheus'"
+ )
# now check the stand-by mgr-s
standby_arr = json_out.get("mgrmap", {}).get("standbys", [])
for each_standby in standby_arr:
@@ -1299,6 +1302,14 @@ def validate_rbd_pool(self):
f"The provided pool, '{self._arg_parser.rbd_data_pool_name}', does not exist"
)
+ def init_rbd_pool(self):
+ if isinstance(self.cluster, DummyRados):
+ return
+ rbd_pool_name = self._arg_parser.rbd_data_pool_name
+ ioctx = self.cluster.open_ioctx(rbd_pool_name)
+ rbd_inst = rbd.RBD()
+ rbd_inst.pool_init(ioctx, True)
+
def validate_rados_namespace(self):
rbd_pool_name = self._arg_parser.rbd_data_pool_name
rados_namespace = self._arg_parser.rados_namespace
@@ -1467,6 +1478,7 @@ def _gen_output_map(self):
self._arg_parser.k8s_cluster_name.lower()
) # always convert cluster name to lowercase characters
self.validate_rbd_pool()
+ self.init_rbd_pool()
self.validate_rados_namespace()
self._excluded_keys.add("K8S_CLUSTER_NAME")
self.get_cephfs_data_pool_details()
diff --git a/deploy/examples/dashboard-external-http.yaml b/deploy/examples/dashboard-external-http.yaml
index 82338367f554..8d9f72d419c7 100644
--- a/deploy/examples/dashboard-external-http.yaml
+++ b/deploy/examples/dashboard-external-http.yaml
@@ -15,6 +15,6 @@ spec:
selector:
app: rook-ceph-mgr
mgr_role: active
- rook_cluster: rook-ceph
+ rook_cluster: rook-ceph # namespace:cluster
sessionAffinity: None
type: NodePort
diff --git a/deploy/examples/dashboard-external-https.yaml b/deploy/examples/dashboard-external-https.yaml
index 6a18dd30ceaa..86d46b847942 100644
--- a/deploy/examples/dashboard-external-https.yaml
+++ b/deploy/examples/dashboard-external-https.yaml
@@ -15,6 +15,6 @@ spec:
selector:
app: rook-ceph-mgr
mgr_role: active
- rook_cluster: rook-ceph
+ rook_cluster: rook-ceph # namespace:cluster
sessionAffinity: None
type: NodePort
diff --git a/deploy/examples/dashboard-loadbalancer.yaml b/deploy/examples/dashboard-loadbalancer.yaml
index 19fe8eb5c302..1785621bd5b3 100644
--- a/deploy/examples/dashboard-loadbalancer.yaml
+++ b/deploy/examples/dashboard-loadbalancer.yaml
@@ -15,6 +15,6 @@ spec:
selector:
app: rook-ceph-mgr
mgr_role: active
- rook_cluster: rook-ceph
+ rook_cluster: rook-ceph # namespace:cluster
sessionAffinity: None
type: LoadBalancer
diff --git a/deploy/examples/monitoring/exporter-service-monitor.yaml b/deploy/examples/monitoring/exporter-service-monitor.yaml
index 10a0b7934f1c..f195f490e13e 100644
--- a/deploy/examples/monitoring/exporter-service-monitor.yaml
+++ b/deploy/examples/monitoring/exporter-service-monitor.yaml
@@ -12,7 +12,7 @@ spec:
selector:
matchLabels:
app: rook-ceph-exporter
- rook_cluster: rook-ceph
+ rook_cluster: rook-ceph # namespace:cluster
ceph_daemon_id: exporter
endpoints:
- port: ceph-exporter-http-metrics
diff --git a/deploy/examples/monitoring/service-monitor.yaml b/deploy/examples/monitoring/service-monitor.yaml
index 9bf062687828..7ef491f428ff 100644
--- a/deploy/examples/monitoring/service-monitor.yaml
+++ b/deploy/examples/monitoring/service-monitor.yaml
@@ -12,7 +12,7 @@ spec:
selector:
matchLabels:
app: rook-ceph-mgr
- rook_cluster: rook-ceph
+ rook_cluster: rook-ceph # namespace:cluster
endpoints:
- port: http-metrics
path: /metrics
diff --git a/deploy/examples/nfs-load-balancer.yaml b/deploy/examples/nfs-load-balancer.yaml
index 4b7f82afeefd..5fcf73d96030 100644
--- a/deploy/examples/nfs-load-balancer.yaml
+++ b/deploy/examples/nfs-load-balancer.yaml
@@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: rook-ceph-nfs-my-nfs-load-balancer
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:cluster
spec:
ports:
- name: nfs
diff --git a/deploy/examples/osd-env-override.yaml b/deploy/examples/osd-env-override.yaml
index 454ccc50a5ed..2eb80a8d0460 100644
--- a/deploy/examples/osd-env-override.yaml
+++ b/deploy/examples/osd-env-override.yaml
@@ -8,7 +8,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: rook-ceph-osd-env-override
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:cluster
data:
# Bypass the ASan's assertion that it is the very first loaded DSO.
# This is necessary for crimson-osd as it's currently built with
diff --git a/deploy/examples/pool-mirrored.yaml b/deploy/examples/pool-mirrored.yaml
index 7fe22d1980e5..4b7a16a2a893 100644
--- a/deploy/examples/pool-mirrored.yaml
+++ b/deploy/examples/pool-mirrored.yaml
@@ -7,7 +7,7 @@ apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: mirrored-pool
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:cluster
spec:
replicated:
size: 3
diff --git a/deploy/examples/sqlitevfs-client.yaml b/deploy/examples/sqlitevfs-client.yaml
index f6fc9674ff48..a821bd2923f1 100644
--- a/deploy/examples/sqlitevfs-client.yaml
+++ b/deploy/examples/sqlitevfs-client.yaml
@@ -6,7 +6,7 @@ apiVersion: ceph.rook.io/v1
kind: CephClient
metadata:
name: sqlitevfs
- namespace: rook-ceph
+ namespace: rook-ceph # namespace:cluster
spec:
caps:
mon: 'allow r'
diff --git a/deploy/examples/volume-replication-class.yaml b/deploy/examples/volume-replication-class.yaml
index 5700285cf2ea..312c0a2eac4d 100644
--- a/deploy/examples/volume-replication-class.yaml
+++ b/deploy/examples/volume-replication-class.yaml
@@ -9,4 +9,4 @@ spec:
schedulingInterval: "12m"
schedulingStartTime: "16:18:43"
replication.storage.openshift.io/replication-secret-name: rook-csi-rbd-provisioner
- replication.storage.openshift.io/replication-secret-namespace: rook-ceph
+ replication.storage.openshift.io/replication-secret-namespace: rook-ceph # namespace:operator
diff --git a/go.mod b/go.mod
index 960aedbe434c..44f949962656 100644
--- a/go.mod
+++ b/go.mod
@@ -4,14 +4,14 @@ go 1.20
require (
github.com/IBM/keyprotect-go-client v0.12.2
- github.com/aws/aws-sdk-go v1.45.24
+ github.com/aws/aws-sdk-go v1.46.1
github.com/banzaicloud/k8s-objectmatcher v1.8.0
- github.com/ceph/go-ceph v0.23.0
+ github.com/ceph/go-ceph v0.24.0
github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8
github.com/csi-addons/kubernetes-csi-addons v0.7.0
github.com/gemalto/kmip-go v0.0.10
github.com/go-ini/ini v1.67.0
- github.com/google/go-cmp v0.5.9
+ github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.3.1
github.com/hashicorp/vault/api v1.10.0
github.com/jetstack/cert-manager v1.7.3
@@ -30,14 +30,14 @@ require (
golang.org/x/sync v0.4.0
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.28.2
- k8s.io/apiextensions-apiserver v0.28.2
- k8s.io/apimachinery v0.28.2
- k8s.io/cli-runtime v0.28.2
- k8s.io/client-go v0.28.2
- k8s.io/cloud-provider v0.28.2
+ k8s.io/api v0.28.3
+ k8s.io/apiextensions-apiserver v0.28.3
+ k8s.io/apimachinery v0.28.3
+ k8s.io/cli-runtime v0.28.3
+ k8s.io/client-go v0.28.3
+ k8s.io/cloud-provider v0.28.3
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
- sigs.k8s.io/controller-runtime v0.16.2
+ sigs.k8s.io/controller-runtime v0.16.3
sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/yaml v1.3.0
)
@@ -129,7 +129,7 @@ require (
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/component-base v0.28.2 // indirect
+ k8s.io/component-base v0.28.3 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
diff --git a/go.sum b/go.sum
index 06246e3ad7b5..2e3ed19f09d6 100644
--- a/go.sum
+++ b/go.sum
@@ -450,8 +450,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
-github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo=
-github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.46.1 h1:U26quvBWFZMQuultLw5tloW4GnmWaChEwMZNq8uYatw=
+github.com/aws/aws-sdk-go v1.46.1/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A=
github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -468,8 +468,8 @@ github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
-github.com/ceph/go-ceph v0.23.0 h1:b/glx9y7vXQJd5m6VqfNv9krYHny7W/+Zc3fNyTbIIo=
-github.com/ceph/go-ceph v0.23.0/go.mod h1:QDYBF+MBdj9B5nSAvevZOrh5WSOflN15t/tZ67b17Qw=
+github.com/ceph/go-ceph v0.24.0 h1:ab1pQCTiNrwjJJJ3bebwQM9tjDQ4tXGKfXAZBNdFiYI=
+github.com/ceph/go-ceph v0.24.0/go.mod h1:gdL5+ewDeHcbV4ZsfD3EH3na35trT07YaTVD1hhJWEg=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@@ -728,8 +728,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -1875,37 +1876,37 @@ k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
-k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
-k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
+k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM=
+k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.18.4/go.mod h1:NYeyeYq4SIpFlPxSAB6jHPIdvu3hL0pc36wuRChybio=
-k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU=
-k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg=
+k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=
+k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
-k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
+k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A=
+k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8=
-k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk=
-k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA=
+k8s.io/cli-runtime v0.28.3 h1:lvuJYVkwCqHEvpS6KuTZsUVwPePFjBfSGvuaLl2SxzA=
+k8s.io/cli-runtime v0.28.3/go.mod h1:jeX37ZPjIcENVuXDDTskG3+FnVuZms5D9omDXS/2Jjc=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY=
-k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY=
-k8s.io/cloud-provider v0.28.2 h1:9qsYm86hm4bnPgZbl9LE29Zfgjuq3NZR2dgtPioJ40s=
-k8s.io/cloud-provider v0.28.2/go.mod h1:40fqf6MtgYho5Eu4gkyLgh5abxU/QKTMTIwBxt4ILyU=
+k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4=
+k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo=
+k8s.io/cloud-provider v0.28.3 h1:9u+JjA3zIn0nqLOOa8tWnprFkffguSAhfBvo8p7LhBQ=
+k8s.io/cloud-provider v0.28.3/go.mod h1:shAJxdrKu+SwwGUhkodxByPjaH8KBFZqXo6jU1F0ehI=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk=
-k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E=
-k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc=
+k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI=
+k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1943,8 +1944,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A=
-sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU=
-sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU=
+sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4=
+sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0=
sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
diff --git a/mkdocs.yml b/mkdocs.yml
index fe2cbdf7149d..6b7b20db610f 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -1,6 +1,6 @@
site_name: Rook Ceph Documentation
docs_dir: Documentation/
-site_url: 'https://rook.io'
+site_url: "https://rook.io"
repo_url: https://github.com/rook/rook
edit_uri: edit/master/Documentation/
site_author: Rook Authors
@@ -26,15 +26,15 @@ theme:
favicon: https://rook.io/images/favicon_192x192.png
logo: https://rook.io/images/rook-logo.svg
palette:
- - scheme: 'default'
- primary: 'rook-blue'
- accent: 'deep orange'
+ - scheme: "default"
+ primary: "rook-blue"
+ accent: "deep orange"
toggle:
icon: material/toggle-switch-off-outline
name: Switch to dark mode
- - scheme: 'slate'
- primary: 'rook-blue'
- accent: 'red'
+ - scheme: "slate"
+ primary: "rook-blue"
+ accent: "red"
toggle:
icon: material/toggle-switch
name: Switch to light mode
@@ -75,10 +75,11 @@ plugins:
README.md: Getting-Started/intro.md
- mike:
# these fields are all optional; the defaults are as below...
- version_selector: true # set to false to leave out the version selector
- css_dir: css # the directory to put the version selector's CSS
- javascript_dir: js # the directory to put the version selector's JS
- canonical_version: null # the version for ; `null`
+ version_selector: true # set to false to leave out the version selector
+ css_dir: css # the directory to put the version selector's CSS
+ javascript_dir: js # the directory to put the version selector's JS
+ canonical_version:
+ null # the version for ; `null`
# uses the version specified via `mike deploy`
markdown_extensions:
- admonition
@@ -91,8 +92,8 @@ markdown_extensions:
- tables
- pymdownx.details
- pymdownx.emoji:
- emoji_index: !!python/name:materialx.emoji.twemoji
- emoji_generator: !!python/name:materialx.emoji.to_svg
+ emoji_index: !!python/name:material.extensions.emoji.twemoji
+ emoji_generator: !!python/name:material.extensions.emoji.to_svg
- pymdownx.highlight:
anchor_linenums: true
use_pygments: true
@@ -116,6 +117,6 @@ extra:
- icon: fontawesome/brands/twitter
link: https://twitter.com/rook_io
- icon: fontawesome/solid/envelopes-bulk
- link: 'https://groups.google.com/forum/#!forum/rook-dev'
+ link: "https://groups.google.com/forum/#!forum/rook-dev"
- icon: fontawesome/brands/medium
link: https://blog.rook.io/
diff --git a/pkg/operator/ceph/cluster/nodedaemon/keyring.go b/pkg/operator/ceph/cluster/nodedaemon/keyring.go
index 0472ba3e799d..af59694bf086 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/keyring.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/keyring.go
@@ -34,7 +34,7 @@ const (
[client.crash]
key = %s
caps mon = "allow profile crash"
- caps mgr = "allow profile crash"
+ caps mgr = "allow rw"
`
)
@@ -59,7 +59,7 @@ func CreateCrashCollectorSecret(context *clusterd.Context, clusterInfo *client.C
func cephCrashCollectorKeyringCaps() []string {
return []string{
"mon", "allow profile crash",
- "mgr", "allow profile crash",
+ "mgr", "allow rw",
}
}
diff --git a/pkg/operator/ceph/cluster/nodedaemon/keyring_test.go b/pkg/operator/ceph/cluster/nodedaemon/keyring_test.go
index 376c23b60ba1..4dc9f244c92b 100644
--- a/pkg/operator/ceph/cluster/nodedaemon/keyring_test.go
+++ b/pkg/operator/ceph/cluster/nodedaemon/keyring_test.go
@@ -24,5 +24,5 @@ import (
func TestCephCrashCollectorKeyringCaps(t *testing.T) {
caps := cephCrashCollectorKeyringCaps()
- assert.Equal(t, caps, []string{"mon", "allow profile crash", "mgr", "allow profile crash"})
+ assert.Equal(t, caps, []string{"mon", "allow profile crash", "mgr", "allow rw"})
}
diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go
index 599cfb840493..da55e20ce641 100644
--- a/pkg/operator/ceph/cluster/watcher.go
+++ b/pkg/operator/ceph/cluster/watcher.go
@@ -219,7 +219,10 @@ func (c *clientCluster) fenceNode(ctx context.Context, node *corev1.Node, cluste
return nil
}
- clusterInfo := cephclient.AdminClusterInfo(ctx, cluster.Namespace, cluster.Name)
+ clusterInfo, _, _, err := opcontroller.LoadClusterInfo(c.context, ctx, cluster.Namespace, &cluster.Spec)
+ if err != nil {
+ return pkgerror.Wrapf(err, "Failed to load cluster info.")
+ }
for i := range rbdPVList {
err = c.fenceRbdImage(ctx, node, cluster, clusterInfo, rbdPVList[i])
diff --git a/pkg/operator/ceph/cluster/watcher_test.go b/pkg/operator/ceph/cluster/watcher_test.go
index 2a28d755f1ea..19d659437ac1 100644
--- a/pkg/operator/ceph/cluster/watcher_test.go
+++ b/pkg/operator/ceph/cluster/watcher_test.go
@@ -271,7 +271,24 @@ func TestHandleNodeFailure(t *testing.T) {
},
}
- _, err := c.context.Clientset.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
+ // Mock clusterInfo
+ secrets := map[string][]byte{
+ "fsid": []byte("c47cac40-9bee-4d52-823b-ccd803ba5bfe"),
+ "mon-secret": []byte("monsecret"),
+ "admin-secret": []byte("adminsecret"),
+ }
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "rook-ceph-mon",
+ Namespace: ns,
+ },
+ Data: secrets,
+ Type: k8sutil.RookType,
+ }
+ _, err := c.context.Clientset.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{})
+ assert.NoError(t, err)
+
+ _, err = c.context.Clientset.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
assert.NoError(t, err)
_, err = c.context.ApiExtensionsClient.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, &v1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: "networkfences.csiaddons.openshift.io"}}, metav1.CreateOptions{})
diff --git a/pkg/operator/ceph/object/controller_test.go b/pkg/operator/ceph/object/controller_test.go
index efdd303538cb..76f3fcfe930d 100644
--- a/pkg/operator/ceph/object/controller_test.go
+++ b/pkg/operator/ceph/object/controller_test.go
@@ -348,7 +348,7 @@ var mockMultisiteAdminOpsCtxFunc = func(objContext *Context, spec *cephv1.Object
return &AdminOpsContext{
Context: *context,
AdminOpsUserAccessKey: "EOE7FYCNOBZJ5VFV909G",
- AdminOpsUserSecretKey: "qmIqpWm8HxCzmynCrD6U6vKWi4hnDBndOnmxXNsV",
+ AdminOpsUserSecretKey: "qmIqpWm8HxCzmynCrD6U6vKWi4hnDBndOnmxXNsV", // notsecret
AdminOpsClient: adminClient,
}, nil
}
diff --git a/pkg/operator/ceph/object/user/controller_test.go b/pkg/operator/ceph/object/user/controller_test.go
index 4094b1a719ac..498e6a503e5a 100644
--- a/pkg/operator/ceph/object/user/controller_test.go
+++ b/pkg/operator/ceph/object/user/controller_test.go
@@ -316,7 +316,7 @@ func TestCephObjectStoreUserController(t *testing.T) {
return &cephobject.AdminOpsContext{
Context: *context,
AdminOpsUserAccessKey: "53S6B9S809NUP19IJ2K3",
- AdminOpsUserSecretKey: "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR",
+ AdminOpsUserSecretKey: "1bXPegzsGClvoGAiJdHQD1uOW2sQBLAZM9j9VtXR", // notsecret
AdminOpsClient: adminClient,
}, nil
}
diff --git a/pkg/operator/k8sutil/node.go b/pkg/operator/k8sutil/node.go
index d9cb27a255c3..0962c26929ad 100644
--- a/pkg/operator/k8sutil/node.go
+++ b/pkg/operator/k8sutil/node.go
@@ -281,12 +281,18 @@ func GetKubernetesNodesMatchingRookNodes(ctx context.Context, rookNodes []cephv1
if err != nil {
return nodes, fmt.Errorf("failed to list kubernetes nodes. %+v", err)
}
- for _, kn := range k8sNodes.Items {
- for _, rn := range rookNodes {
+ for _, rn := range rookNodes {
+ nodeFound := false
+ for _, kn := range k8sNodes.Items {
if rookNodeMatchesKubernetesNode(rn, kn) {
nodes = append(nodes, kn)
+ nodeFound = true
+ break
}
}
+ if !nodeFound {
+ logger.Warningf("failed to find matching kubernetes node for %q. Check the CephCluster's config and confirm each 'name' field in spec.storage.nodes matches their 'kubernetes.io/hostname' label", rn.Name)
+ }
}
return nodes, nil
}