diff --git a/.github/workflows/.yamllint b/.github/workflows/.yamllint
index 44fa3ac3fc2c..80dfe3424fb3 100644
--- a/.github/workflows/.yamllint
+++ b/.github/workflows/.yamllint
@@ -3,6 +3,7 @@ ignore: |
deploy/examples/csi/template
deploy/examples/crds.yaml
deploy/examples/monitoring/
+ deploy/examples/csi-operator.yaml
rules:
line-length: disable
new-lines: disable
diff --git a/.github/workflows/auto-assign.yaml b/.github/workflows/auto-assign.yaml
index 75ac0f86f2ea..13c1910732b8 100644
--- a/.github/workflows/auto-assign.yaml
+++ b/.github/workflows/auto-assign.yaml
@@ -10,7 +10,7 @@ jobs:
assign:
permissions:
# write permissions are needed to assign the issue.
- contents: write
+ issues: write
name: Run self assign job
runs-on: ubuntu-latest
steps:
diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml
index 2c8b393d25f2..6eb1bce56719 100644
--- a/.github/workflows/canary-integration-test.yml
+++ b/.github/workflows/canary-integration-test.yml
@@ -13,6 +13,9 @@ defaults:
# reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell
shell: bash --noprofile --norc -eo pipefail -x {0}
+permissions:
+ contents: read
+
jobs:
canary:
runs-on: ubuntu-22.04
diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml
index 058cb48c79d9..e83f2c4844dc 100644
--- a/.github/workflows/codespell.yaml
+++ b/.github/workflows/codespell.yaml
@@ -34,7 +34,7 @@ jobs:
# in other places, so ignore the file itself assuming it is correct
# crds.yaml, resources.yaml: CRD files are fully generated from content we control (should
# be flagged elsewhere) and content we don't control (can't fix easily), so ignore
- skip: .git,*.png,*.jpg,*.svg,*.sum,./LICENSE,./deploy/examples/crds.yaml,./deploy/charts/rook-ceph/templates/resources.yaml
+ skip: .git,*.png,*.jpg,*.svg,*.sum,./LICENSE,./deploy/examples/crds.yaml,./deploy/charts/rook-ceph/templates/resources.yaml,./deploy/examples/csi-operator.yaml
# aks: Amazon Kubernetes Service
# keyserver: flag to apt-key
# atleast: codespell wants to flag any 'AtLeast' method
diff --git a/.github/workflows/integration-test-helm-suite.yaml b/.github/workflows/integration-test-helm-suite.yaml
index 515e0c8525bc..b15335b1a6e2 100644
--- a/.github/workflows/integration-test-helm-suite.yaml
+++ b/.github/workflows/integration-test-helm-suite.yaml
@@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-test-keystone-auth-suite.yaml b/.github/workflows/integration-test-keystone-auth-suite.yaml
new file mode 100644
index 000000000000..f13aed197914
--- /dev/null
+++ b/.github/workflows/integration-test-keystone-auth-suite.yaml
@@ -0,0 +1,69 @@
+name: Integration test CephKeystoneAuthSuite
+on:
+ pull_request:
+ branches:
+ - master
+ - release-*
+ paths-ignore:
+ - "Documentation/**"
+ - "design/**"
+
+defaults:
+ run:
+ # reference: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell
+ shell: bash --noprofile --norc -eo pipefail -x {0}
+
+permissions:
+ contents: read
+
+# cancel the in-progress workflow when PR is refreshed.
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
+ cancel-in-progress: true
+
+jobs:
+ TestCephKeystoneAuthSuite:
+ if: ${{ github.event_name == 'pull_request' && github.ref != 'refs/heads/master' && !contains(github.event.pull_request.labels.*.name, 'skip-ci') }}
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
+ steps:
+ - name: checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: consider debugging
+ uses: ./.github/workflows/tmate_debug
+ with:
+ use-tmate: ${{ secrets.USE_TMATE }}
+
+ - name: setup latest cluster resources
+ uses: ./.github/workflows/integration-test-config-latest-k8s
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ kubernetes-version: ${{ matrix.kubernetes-versions }}
+
+ - name: TestCephKeystoneAuthSuite
+ run: |
+ tests/scripts/github-action-helper.sh collect_udev_logs_in_background
+ tests/scripts/helm.sh up
+ export DEVICE_FILTER=$(lsblk|awk '/14G/ || /64G/ {print $1}'| head -1)
+ SKIP_CLEANUP_POLICY=false go test -v -timeout 3600s -failfast -run CephKeystoneAuthSuite github.com/rook/rook/tests/integration
+
+ - name: collect common logs
+ if: always()
+ run: |
+ export LOG_DIR="/home/runner/work/rook/rook/tests/integration/_output/tests/"
+ export CLUSTER_NAMESPACE="keystoneauth-ns"
+ export OPERATOR_NAMESPACE="keystoneauth-ns-system"
+ tests/scripts/collect-logs.sh
+
+ - name: Artifact
+ uses: actions/upload-artifact@v4
+ if: failure()
+ with:
+ name: ceph-keystone-auth-suite-artifact-${{ matrix.kubernetes-versions }}
+ path: /home/runner/work/rook/rook/tests/integration/_output/tests/
diff --git a/.github/workflows/integration-test-mgr-suite.yaml b/.github/workflows/integration-test-mgr-suite.yaml
index ab53093ea095..363e1b2505a5 100644
--- a/.github/workflows/integration-test-mgr-suite.yaml
+++ b/.github/workflows/integration-test-mgr-suite.yaml
@@ -27,7 +27,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.30.0"]
+ kubernetes-versions: ["v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-test-multi-cluster-suite.yaml b/.github/workflows/integration-test-multi-cluster-suite.yaml
index be6bf74a74e3..d5635acc407e 100644
--- a/.github/workflows/integration-test-multi-cluster-suite.yaml
+++ b/.github/workflows/integration-test-multi-cluster-suite.yaml
@@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.30.0"]
+ kubernetes-versions: ["v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-test-object-suite.yaml b/.github/workflows/integration-test-object-suite.yaml
index ac075715823e..923a85f3e9e5 100644
--- a/.github/workflows/integration-test-object-suite.yaml
+++ b/.github/workflows/integration-test-object-suite.yaml
@@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-test-smoke-suite.yaml b/.github/workflows/integration-test-smoke-suite.yaml
index a458ce7b3a6a..4f0a0d6a73ba 100644
--- a/.github/workflows/integration-test-smoke-suite.yaml
+++ b/.github/workflows/integration-test-smoke-suite.yaml
@@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-test-upgrade-suite.yaml b/.github/workflows/integration-test-upgrade-suite.yaml
index 679b9e82d88e..07ce172f4946 100644
--- a/.github/workflows/integration-test-upgrade-suite.yaml
+++ b/.github/workflows/integration-test-upgrade-suite.yaml
@@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -72,7 +72,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/integration-tests-on-release.yaml b/.github/workflows/integration-tests-on-release.yaml
index 63b3f9bb180e..dfea5dfe3a6d 100644
--- a/.github/workflows/integration-tests-on-release.yaml
+++ b/.github/workflows/integration-tests-on-release.yaml
@@ -21,7 +21,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -61,7 +61,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -102,7 +102,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -140,7 +140,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -178,7 +178,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.27.13", "v1.28.9", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.28.12", "v1.29.7", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
@@ -219,7 +219,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- kubernetes-versions: ["v1.25.16", "v1.30.0"]
+ kubernetes-versions: ["v1.26.15", "v1.31.0"]
steps:
- name: checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/multus.yaml b/.github/workflows/multus.yaml
index 18955b1f980d..ae93efd16feb 100644
--- a/.github/workflows/multus.yaml
+++ b/.github/workflows/multus.yaml
@@ -11,6 +11,7 @@ on:
- cmd/rook/userfacing/**
- pkg/daemon/multus/**
- .github/workflows/multus.yaml
+ - tests/scripts/multus/**
defaults:
run:
diff --git a/Documentation/CRDs/Cluster/ceph-cluster-crd.md b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
index f4b067195294..af35aa3eccdf 100755
--- a/Documentation/CRDs/Cluster/ceph-cluster-crd.md
+++ b/Documentation/CRDs/Cluster/ceph-cluster-crd.md
@@ -82,6 +82,9 @@ For more details on the mons and when to choose a number other than `3`, see the
* `config`: Config settings applied to all OSDs on the node unless overridden by `devices`. See the [config settings](#osd-configuration-settings) below.
* `allowDeviceClassUpdate`: Whether to allow changing the device class of an OSD after it is created. The default is false
to prevent unintentional data movement or CRUSH changes if the device class is changed accidentally.
+ * `allowOsdCrushWeightUpdate`: Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased.
+ This allows cluster data to be rebalanced to make most effective use of new OSD space.
+ The default is false since data rebalancing can cause temporary cluster slowdown.
* [storage selection settings](#storage-selection-settings)
* [Storage Class Device Sets](#storage-class-device-sets)
* `onlyApplyOSDPlacement`: Whether the placement specific for OSDs is merged with the `all` placement. If `false`, the OSD placement will be merged with the `all` placement. If true, the `OSD placement will be applied` and the `all` placement will be ignored. The placement for OSDs is computed from several different places depending on the type of OSD:
@@ -322,7 +325,7 @@ The following are the settings for Storage Class Device Sets which can be config
* `preparePlacement`: The placement criteria for the preparation of the OSD devices. Creating OSDs is a two-step process and the prepare job may require different placement than the OSD daemons. If the `preparePlacement` is not specified, the `placement` will instead be applied for consistent placement for the OSD prepare jobs and OSD deployments. The `preparePlacement` is only useful for `portable` OSDs in the device sets. OSDs that are not portable will be tied to the host where the OSD prepare job initially runs.
* For example, provisioning may require topology spread constraints across zones, but the OSD daemons may require constraints across hosts within the zones.
* `portable`: If `true`, the OSDs will be allowed to move between nodes during failover. This requires a storage class that supports portability (e.g. `aws-ebs`, but not the local storage provisioner). If `false`, the OSDs will be assigned to a node permanently. Rook will configure Ceph's CRUSH map to support the portability.
-* `tuneDeviceClass`: For example, Ceph cannot detect AWS volumes as HDDs from the storage class "gp2", so you can improve Ceph performance by setting this to true.
+* `tuneDeviceClass`: For example, Ceph cannot detect AWS volumes as HDDs from the storage class "gp2-csi", so you can improve Ceph performance by setting this to true.
* `tuneFastDeviceClass`: For example, Ceph cannot detect Azure disks as SSDs from the storage class "managed-premium", so you can improve Ceph performance by setting this to true..
* `volumeClaimTemplates`: A list of PVC templates to use for provisioning the underlying storage devices.
* `metadata.name`: "data", "metadata", or "wal". If a single template is provided, the name must be "data". If the name is "metadata" or "wal", the devices are used to store the Ceph metadata or WAL respectively. In both cases, the devices must be raw devices or LVM logical volumes.
diff --git a/Documentation/CRDs/Cluster/pvc-cluster.md b/Documentation/CRDs/Cluster/pvc-cluster.md
index 231ff7dcaa11..fc3efe4322e5 100644
--- a/Documentation/CRDs/Cluster/pvc-cluster.md
+++ b/Documentation/CRDs/Cluster/pvc-cluster.md
@@ -8,7 +8,7 @@ in clusters where a local PV provisioner is available.
## AWS Storage Example
-In this example, the mon and OSD volumes are provisioned from the AWS `gp2` storage class. This storage class can be replaced by any storage class that provides `file` mode (for mons) and `block` mode (for OSDs).
+In this example, the mon and OSD volumes are provisioned from the AWS `gp2-csi` storage class. This storage class can be replaced by any storage class that provides `file` mode (for mons) and `block` mode (for OSDs).
```yaml
apiVersion: ceph.rook.io/v1
@@ -25,7 +25,7 @@ spec:
allowMultiplePerNode: false
volumeClaimTemplate:
spec:
- storageClassName: gp2
+ storageClassName: gp2-csi
resources:
requests:
storage: 10Gi
@@ -42,8 +42,8 @@ spec:
resources:
requests:
storage: 10Gi
- # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2)
- storageClassName: gp2
+ # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2-csi)
+ storageClassName: gp2-csi
volumeMode: Block
accessModes:
- ReadWriteOnce
diff --git a/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md b/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
index 0612a5e3b3a6..9ef7ca235e59 100644
--- a/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
+++ b/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md
@@ -65,8 +65,11 @@ spec:
#zone:
#name: zone-a
#hosting:
+ # advertiseEndpoint:
+ # dnsName: "mystore.example.com"
+ # port: 80
+ # useTls: false
# dnsNames:
- # - "mystore.example.com"
# - "mystore.example.org"
```
@@ -93,6 +96,68 @@ When the `zone` section is set pools with the object stores name will not be cre
This is useful for applications that need object store credentials to be created in their own namespace,
where neither OBCs nor COSI is being used to create buckets. The default is empty.
+## Auth Settings
+
+The `auth`-section allows the configuration of authentication providers in addition to the regular authentication mechanism.
+
+Currently only OpenStack Keystone is supported.
+
+### Keystone Settings
+
+The keystone authentication can be configured in the `spec.auth.keystone` section of the CRD:
+
+```yaml
+spec:
+ [...]
+ auth:
+ keystone:
+ acceptedRoles:
+ - admin
+ - member
+ - service
+ implicitTenants: "swift"
+ revocationInterval: 1200
+ serviceUserSecretName: usersecret
+ tokenCacheSize: 1000
+ url: https://keystone.example-namespace.svc/
+ protocols:
+ swift:
+ accountInUrl: true
+ urlPrefix: /swift
+ [...]
+```
+
+Note: With this example configuration S3 is implicitly enabled even though it is not enabled in the `protocols` section.
+
+The following options can be configured in the `keystone`-section:
+
+* `acceptedRoles`: The OpenStack Keystone [roles](https://docs.openstack.org/keystone/latest/admin/cli-manage-projects-users-and-roles.html#roles-and-role-assignments) accepted by RGW when authenticating against Keystone.
+* `implicitTenants`: Indicates whether to use implicit tenants. This can be `true`, `false`, `swift` and `s3`. For more details see the Ceph RadosGW documentation on [multitenancy](https://docs.ceph.com/en/latest/radosgw/multitenancy/).
+* `revocationInterval`: The number of seconds between token revocation checks.
+* `serviceUserSecretName`: the name of the user secret containing the credentials for the admin user to use by rgw when communicating with Keystone. See [Object Store with Keystone and Swift](../../Storage-Configuration/Object-Storage-RGW/ceph-object-swift.md) for more details on what the secret must contain.
+* `tokenCacheSize`: specifies the maximum number of entries in each Keystone token cache.
+* `url`: The url of the Keystone API endpoint to use.
+
+The protocols section is divided into two parts:
+
+- a section to configure S3
+- a section to configure swift
+
+#### protocols/S3 settings
+
+In the `s3` section of the `protocols` section the following options can be configured:
+
+* `authKeystone`: Whether S3 should also authenticated using Keystone (`true`) or not (`false`). If set to `false` the default S3 auth will be used.
+* `enabled`: Whether to enable S3 (`true`) or not (`false`). The default is `true` even if the section is not listed at all! Please note that S3 should not be disabled in a [Ceph Multi Site configuration](https://docs.ceph.com/en/quincy/radosgw/multisite).
+
+#### protocols/swift settings
+
+In the `swift` section of the `protocols` section the following options can be configured:
+
+* `accountInUrl`: Whether or not the Swift account name should be included in the Swift API URL. If set to `false` (the default), the Swift API will listen on a URL formed like `http://host:port//v1`. If set to `true`, the Swift API URL will be `http://host:port//v1/AUTH_`. This option must be set to `true` if radosgw should support publicly-readable containers and temporary URLs.
+* `urlPrefix`: The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is `swift`, which makes the Swift API available at the URL `http://host:port/swift/v1` (or `http://host:port/swift/v1/AUTH_%(tenant_id)s` if rgw swift account in url is enabled). "Warning: If you set this option to `/`, the S3 API is automatically disabled. It is not possible to operate radosgw with an urlPrefix of `/` and simultaneously support both the S3 and Swift APIs. [...]" [(see Ceph documentation on swift settings)](https://docs.ceph.com/en/octopus/radosgw/config-ref/#swift-settings).
+* `versioningEnabled`: If set to `true`, enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned.
+
## Gateway Settings
The gateway settings correspond to the RGW daemon settings.
@@ -101,8 +166,7 @@ The gateway settings correspond to the RGW daemon settings.
* `sslCertificateRef`: If specified, this is the name of the Kubernetes secret(`opaque` or `tls`
type) that contains the TLS certificate to be used for secure connections to the object store.
If it is an opaque Kubernetes Secret, Rook will look in the secret provided at the `cert` key name. The value of the `cert` key must be
- in the format expected by the [RGW
- service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb):
+ in the format expected by the [RGW service](https://docs.ceph.com/docs/master/install/ceph-deploy/install-ceph-gateway/#using-ssl-with-civetweb):
"The server key, server certificate, and any other CA or intermediate certificates be supplied in
one file. Each of these items must be in PEM form." They are scenarios where the certificate DNS is set for a particular domain
that does not include the local Kubernetes DNS, namely the object store DNS service endpoint. If
@@ -115,7 +179,10 @@ The gateway settings correspond to the RGW daemon settings.
cluster. Rook will look in the secret provided at the `cabundle` key name.
* `hostNetwork`: Whether host networking is enabled for the rgw daemon. If not set, the network settings from the cluster CR will be applied.
* `port`: The port on which the Object service will be reachable. If host networking is enabled, the RGW daemons will also listen on that port. If running on SDN, the RGW daemon listening port will be 8080 internally.
-* `securePort`: The secure port on which RGW pods will be listening. A TLS certificate must be specified either via `sslCerticateRef` or `service.annotations`
+* `securePort`: The secure port on which RGW pods will be listening. A TLS certificate must be
+ specified either via `sslCerticateRef` or `service.annotations`. Refer to
+ [enabling TLS](../../Storage-Configuration/Object-Storage-RGW/object-storage.md#enabling-tls)
+ documentation for more details.
* `instances`: The number of pods that will be started to load balance this object store.
* `externalRgwEndpoints`: A list of IP addresses to connect to external existing Rados Gateways
(works with external mode). This setting will be ignored if the `CephCluster` does not have
@@ -155,9 +222,30 @@ The [zone](../../Storage-Configuration/Object-Storage-RGW/ceph-object-multisite.
## Hosting Settings
-The hosting settings allow you to host buckets in the object store on a custom DNS name, enabling virtual-hosted-style access to buckets similar to AWS S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html).
-
-* `dnsNames`: a list of DNS names to host buckets on. These names need to valid according RFC-1123. Otherwise it will fail. Each endpoint requires wildcard support like [ingress loadbalancer](https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards). Do not include the wildcard itself in the list of hostnames (e.g., use "mystore.example.com" instead of "*.mystore.example.com"). Add all the hostnames like openshift routes otherwise access will be denied, but if the hostname does not support wild card then virtual host style won't work those hostname. By default cephobjectstore service endpoint and custom endpoints from cephobjectzone is included. The feature is supported only for Ceph v18 and later versions.
+`hosting` settings allow specifying object store endpoint configurations. These settings are only
+supported for Ceph v18 and higher.
+
+A common use case that requires configuring hosting is allowing
+[virtual host-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html)
+bucket access. This use case is discussed in more detail in
+[Rook object storage docs](../../Storage-Configuration/Object-Storage-RGW/object-storage.md#virtual-host-style-bucket-access).
+
+* `advertiseEndpoint`: By default, Rook advertises the most direct connection to RGWs to dependent
+ resources like CephObjectStoreUsers and ObjectBucketClaims. To advertise a different address
+ (e.g., a wildcard-enabled ingress), define the preferred endpoint here. Default behavior is
+ documented in more detail [here](../../Storage-Configuration/Object-Storage-RGW/object-storage.md#object-store-endpoint)
+ * `dnsName`: The valid RFC-1123 (sub)domain name of the endpoint.
+ * `port`: The nonzero port of the endpoint.
+ * `useTls`: Set to true if the endpoint is HTTPS. False if HTTP.
+* `dnsNames`: When this or `advertiseEndpoint` is set, Ceph RGW will reject S3 client connections
+ who attempt to reach the object store via any unspecified DNS name. Add all DNS names that the
+ object store should accept here. These must be valid RFC-1123 (sub)domain names.
+ Rook automatically adds the known CephObjectStore service DNS name to this list, as well as
+ corresponding CephObjectZone `customEndpoints` (if applicable).
+
+!!! Note
+ For DNS names that support wildcards, do not include wildcards.
+ E.g., use `mystore.example.com` instead of `*.mystore.example.com`.
## Runtime settings
diff --git a/Documentation/CRDs/specification.md b/Documentation/CRDs/specification.md
index 944a50dd8b00..301b3c969429 100644
--- a/Documentation/CRDs/specification.md
+++ b/Documentation/CRDs/specification.md
@@ -1847,6 +1847,34 @@ GatewaySpec
+protocols
+
+
+ProtocolSpec
+
+
+ |
+
+(Optional)
+ The protocol specification
+ |
+
+
+
+auth
+
+
+AuthSpec
+
+
+ |
+
+(Optional)
+ The authentication configuration
+ |
+
+
+
zone
@@ -1915,7 +1943,9 @@ ObjectStoreHostingSpec
|
(Optional)
- Hosting settings for the object store
+Hosting settings for the object store.
+A common use case for hosting configuration is to inform Rook of endpoints that support DNS
+wildcards, which in turn allows virtual host-style bucket addressing.
|
@@ -2609,6 +2639,38 @@ CIDRList
AnnotationsSpec is the main spec annotation for all daemons
+AuthSpec
+
+
+(Appears on:ObjectStoreSpec)
+
+
+
AuthSpec represents the authentication protocol configuration of a Ceph Object Store Gateway
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+keystone
+
+
+KeystoneSpec
+
+
+ |
+
+(Optional)
+ The spec for Keystone
+ |
+
+
+
BucketNotificationEvent
(string
alias)
@@ -7068,6 +7130,32 @@ string
+
ImplicitTenantSetting
+(string
alias)
+
+(Appears on:KeystoneSpec)
+
+
+
+
+
+
+Value |
+Description |
+
+
+"" |
+ |
+
"false" |
+ |
+
"s3" |
+ |
+
"swift" |
+ |
+
"true" |
+ |
+
+
KafkaEndpointSpec
@@ -7394,6 +7482,8 @@ string
|
"clusterMetadata" |
|
+
"cmdreporter" |
+ |
"crashcollector" |
|
"dashboard" |
@@ -7418,6 +7508,95 @@ string
|
+KeystoneSpec
+
+
+(Appears on:AuthSpec)
+
+
+
KeystoneSpec represents the Keystone authentication configuration of a Ceph Object Store Gateway
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+url
+
+string
+
+ |
+
+ The URL for the Keystone server.
+ |
+
+
+
+serviceUserSecretName
+
+string
+
+ |
+
+ The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource.
+ |
+
+
+
+acceptedRoles
+
+[]string
+
+ |
+
+ The roles requires to serve requests.
+ |
+
+
+
+implicitTenants
+
+
+ImplicitTenantSetting
+
+
+ |
+
+(Optional)
+ Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants.
+ |
+
+
+
+tokenCacheSize
+
+int
+
+ |
+
+(Optional)
+ The maximum number of entries in each Keystone token cache.
+ |
+
+
+
+revocationInterval
+
+int
+
+ |
+
+(Optional)
+ The number of seconds between token revocation checks.
+ |
+
+
+
Labels
(map[string]string
alias)
@@ -8977,6 +9156,60 @@ and prepares same OSD on that disk
+ObjectEndpointSpec
+
+
+(Appears on:ObjectStoreHostingSpec)
+
+
+
ObjectEndpointSpec represents an object store endpoint
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+dnsName
+
+string
+
+ |
+
+ DnsName is the DNS name (in RFC-1123 format) of the endpoint.
+If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+wildcard itself in the list of hostnames.
+E.g., use “mystore.example.com” instead of “*.mystore.example.com”.
+ |
+
+
+
+port
+
+int32
+
+ |
+
+ Port is the port on which S3 connections can be made for this endpoint.
+ |
+
+
+
+useTls
+
+bool
+
+ |
+
+ UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP).
+ |
+
+
+
ObjectEndpoints
@@ -9160,6 +9393,24 @@ bool
+advertiseEndpoint
+
+
+ObjectEndpointSpec
+
+
+ |
+
+(Optional)
+ AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this
+object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims,
+and COSI Buckets/Accesses.
+By default, Rook returns the endpoint for the object store’s Kubernetes service using HTTPS
+with gateway.securePort if it is defined (otherwise, HTTP with gateway.port ).
+ |
+
+
+
dnsNames
[]string
@@ -9167,11 +9418,15 @@ bool
|
(Optional)
- A list of DNS names in which bucket can be accessed via virtual host path. These names need to valid according RFC-1123.
-Each domain requires wildcard support like ingress loadbalancer.
-Do not include the wildcard itself in the list of hostnames (e.g. use “mystore.example.com” instead of “*.mystore.example.com”).
-Add all hostnames including user-created Kubernetes Service endpoints to the list.
-CephObjectStore Service Endpoints and CephObjectZone customEndpoints are automatically added to the list.
+ A list of DNS host names on which object store gateways will accept client S3 connections.
+When specified, object store gateways will reject client S3 connections to hostnames that are
+not present in this list, so include all endpoints.
+The object store’s advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone
+customEndpoints are automatically added to the list but may be set here again if desired.
+Each DNS name must be valid according RFC-1123.
+If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+wildcard itself in the list of hostnames.
+E.g., use “mystore.example.com” instead of “*.mystore.example.com”.
The feature is supported only for Ceph v18 and later versions.
|
@@ -9308,6 +9563,34 @@ GatewaySpec
+protocols
+
+
+ProtocolSpec
+
+
+ |
+
+(Optional)
+ The protocol specification
+ |
+
+
+
+auth
+
+
+AuthSpec
+
+
+ |
+
+(Optional)
+ The authentication configuration
+ |
+
+
+
zone
@@ -9376,7 +9659,9 @@ ObjectStoreHostingSpec
|
(Optional)
- Hosting settings for the object store
+Hosting settings for the object store.
+A common use case for hosting configuration is to inform Rook of endpoints that support DNS
+wildcards, which in turn allows virtual host-style bucket addressing.
|
@@ -10660,6 +10945,52 @@ alive or ready to receive traffic.
+ProtocolSpec
+
+
+(Appears on:ObjectStoreSpec)
+
+
+
ProtocolSpec represents a Ceph Object Store protocol specification
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+s3
+
+
+S3Spec
+
+
+ |
+
+(Optional)
+ The spec for S3
+ |
+
+
+
+swift
+
+
+SwiftSpec
+
+
+ |
+
+(Optional)
+ The spec for Swift
+ |
+
+
+
PullSpec
@@ -11030,6 +11361,48 @@ HybridStorageSpec
ResourceSpec is a collection of ResourceRequirements that describes the compute resource requirements
+S3Spec
+
+
+(Appears on:ProtocolSpec)
+
+
+
S3Spec represents Ceph Object Store specification for the S3 API
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+enabled
+
+bool
+
+ |
+
+(Optional)
+ Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled.
+ |
+
+
+
+authUseKeystone
+
+bool
+
+ |
+
+(Optional)
+ Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply.
+ |
+
+
+
SSSDSidecar
@@ -12230,6 +12603,20 @@ bool
Whether to allow updating the device class after the OSD is initially provisioned
+
+
+allowOsdCrushWeightUpdate
+
+bool
+
+ |
+
+(Optional)
+ Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased.
+This allows cluster data to be rebalanced to make most effective use of new OSD space.
+The default is false since data rebalancing can cause temporary cluster slowdown.
+ |
+
StoreType
@@ -12307,6 +12694,60 @@ string
+SwiftSpec
+
+
+(Appears on:ProtocolSpec)
+
+
+
SwiftSpec represents Ceph Object Store specification for the Swift API
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+accountInUrl
+
+bool
+
+ |
+
+(Optional)
+ Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs.
+ |
+
+
+
+urlPrefix
+
+string
+
+ |
+
+(Optional)
+ The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled).
+ |
+
+
+
+versioningEnabled
+
+bool
+
+ |
+
+(Optional)
+ Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned.
+ |
+
+
+
TopicEndpointSpec
@@ -12580,7 +13021,7 @@ If the resource referred to by volumeAttributesClass does not exist, this Persis
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
-(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
diff --git a/Documentation/Contributing/development-flow.md b/Documentation/Contributing/development-flow.md
index b3000a03a4e5..9b62dc9adbf5 100644
--- a/Documentation/Contributing/development-flow.md
+++ b/Documentation/Contributing/development-flow.md
@@ -3,7 +3,7 @@ title: Development Flow
---
Thank you for your time and effort to help us improve Rook! Here are a few steps to get started. If you have any questions,
-don't hesitate to reach out to us on our [Slack](https://Rook-io.slack.com) dev channel.
+don't hesitate to reach out to us on our [Slack](https://Rook-io.slack.com) dev channel. Sign up for the Rook Slack [here](https://slack.rook.io).
## Prerequisites
diff --git a/Documentation/Contributing/rook-test-framework.md b/Documentation/Contributing/rook-test-framework.md
index bd8402f6f2bf..bc0d5ace5fb5 100644
--- a/Documentation/Contributing/rook-test-framework.md
+++ b/Documentation/Contributing/rook-test-framework.md
@@ -56,7 +56,7 @@ See [environment.go](/tests/framework/installer/environment.go) for the availabl
Set the following variables:
```console
-export TEST_HELM_PATH=/tmp/rook-tests-scripts-helm/linux-amd64/helm
+export TEST_HELM_PATH=/tmp/rook-tests-scripts-helm/helm
export TEST_BASE_DIR=WORKING_DIR
export TEST_SCRATCH_DEVICE=/dev/vdb
```
@@ -95,7 +95,7 @@ go test -v -timeout 1800s -run CephSmokeSuite github.com/rook/rook/tests/integra
```console
export TEST_ENV_NAME=openshift
- export TEST_STORAGE_CLASS=gp2
+ export TEST_STORAGE_CLASS=gp2-csi
export TEST_BASE_DIR=/tmp
```
diff --git a/Documentation/Getting-Started/Prerequisites/prerequisites.md b/Documentation/Getting-Started/Prerequisites/prerequisites.md
index 7da69e2ad793..587347ebcfa0 100644
--- a/Documentation/Getting-Started/Prerequisites/prerequisites.md
+++ b/Documentation/Getting-Started/Prerequisites/prerequisites.md
@@ -7,7 +7,7 @@ and Rook is granted the required privileges (see below for more information).
## Kubernetes Version
-Kubernetes versions **v1.25** through **v1.30** are supported.
+Kubernetes versions **v1.26** through **v1.31** are supported.
## CPU Architecture
diff --git a/Documentation/Getting-Started/quickstart.md b/Documentation/Getting-Started/quickstart.md
index 4647775b578f..3f0080582212 100644
--- a/Documentation/Getting-Started/quickstart.md
+++ b/Documentation/Getting-Started/quickstart.md
@@ -12,7 +12,7 @@ This guide will walk through the basic setup of a Ceph cluster and enable K8s ap
## Kubernetes Version
-Kubernetes versions **v1.25** through **v1.30** are supported.
+Kubernetes versions **v1.26** through **v1.31** are supported.
## CPU Architecture
@@ -36,7 +36,7 @@ To configure the Ceph storage cluster, at least one of these local storage optio
A simple Rook cluster is created for Kubernetes with the following `kubectl` commands and [example manifests](https://github.com/rook/rook/blob/master/deploy/examples).
```console
-$ git clone --single-branch --branch master https://github.com/rook/rook.git
+$ git clone --single-branch --branch v1.15.0 https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml
diff --git a/Documentation/Helm-Charts/operator-chart.gotmpl.md b/Documentation/Helm-Charts/operator-chart.gotmpl.md
index e000bcdb5b71..3a045f4094d2 100644
--- a/Documentation/Helm-Charts/operator-chart.gotmpl.md
+++ b/Documentation/Helm-Charts/operator-chart.gotmpl.md
@@ -44,7 +44,7 @@ The following table lists the configurable parameters of the rook-operator chart
{{ template "chart.valuesTable" . }}
-[^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
+[^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage;role=rook-example` or `storage;` (_checks only for presence of key_)
### **Development Build**
diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md
index 4cb9cc560efd..2a7173c73949 100644
--- a/Documentation/Helm-Charts/operator-chart.md
+++ b/Documentation/Helm-Charts/operator-chart.md
@@ -54,20 +54,20 @@ The following table lists the configurable parameters of the rook-operator chart
| `crds.enabled` | Whether the helm chart should create and update the CRDs. If false, the CRDs must be managed independently with deploy/examples/crds.yaml. **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED. If the CRDs are deleted in this case, see [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion) to restore them. | `true` |
| `csi.allowUnsupportedVersion` | Allow starting an unsupported ceph-csi image | `false` |
| `csi.attacher.repository` | Kubernetes CSI Attacher image repository | `"registry.k8s.io/sig-storage/csi-attacher"` |
-| `csi.attacher.tag` | Attacher image tag | `"v4.5.1"` |
+| `csi.attacher.tag` | Attacher image tag | `"v4.6.1"` |
| `csi.cephFSAttachRequired` | Whether to skip any attach operation altogether for CephFS PVCs. See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object). If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details. | `true` |
| `csi.cephFSFSGroupPolicy` | Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html | `"File"` |
| `csi.cephFSKernelMountOptions` | Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options. Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR | `nil` |
| `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate | `RollingUpdate` |
| `csi.cephFSPluginUpdateStrategyMaxUnavailable` | A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy. | `1` |
| `csi.cephcsi.repository` | Ceph CSI image repository | `"quay.io/cephcsi/cephcsi"` |
-| `csi.cephcsi.tag` | Ceph CSI image tag | `"v3.11.0"` |
+| `csi.cephcsi.tag` | Ceph CSI image tag | `"v3.12.0"` |
| `csi.cephfsLivenessMetricsPort` | CSI CephFS driver metrics port | `9081` |
| `csi.cephfsPodLabels` | Labels to add to the CSI CephFS Deployments and DaemonSets Pods | `nil` |
| `csi.clusterName` | Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster | `nil` |
| `csi.csiAddons.enabled` | Enable CSIAddons | `false` |
| `csi.csiAddons.repository` | CSIAddons sidecar image repository | `"quay.io/csiaddons/k8s-sidecar"` |
-| `csi.csiAddons.tag` | CSIAddons sidecar image tag | `"v0.8.0"` |
+| `csi.csiAddons.tag` | CSIAddons sidecar image tag | `"v0.9.0"` |
| `csi.csiAddonsPort` | CSI Addons server port | `9070` |
| `csi.csiCephFSPluginResource` | CEPH CSI CephFS plugin resource requirement list | see values.yaml |
| `csi.csiCephFSPluginVolume` | The volume of the CephCSI CephFS plugin DaemonSet | `nil` |
@@ -113,7 +113,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `csi.pluginPriorityClassName` | PriorityClassName to be set on csi driver plugin pods | `"system-node-critical"` |
| `csi.pluginTolerations` | Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet | `nil` |
| `csi.provisioner.repository` | Kubernetes CSI provisioner image repository | `"registry.k8s.io/sig-storage/csi-provisioner"` |
-| `csi.provisioner.tag` | Provisioner image tag | `"v4.0.1"` |
+| `csi.provisioner.tag` | Provisioner image tag | `"v5.0.1"` |
| `csi.provisionerNodeAffinity` | The node labels for affinity of the CSI provisioner deployment [^1] | `nil` |
| `csi.provisionerPriorityClassName` | PriorityClassName to be set on csi driver provisioner pods | `"system-cluster-critical"` |
| `csi.provisionerReplicas` | Set replicas for csi provisioner deployment | `2` |
@@ -125,16 +125,16 @@ The following table lists the configurable parameters of the rook-operator chart
| `csi.rbdPluginUpdateStrategyMaxUnavailable` | A maxUnavailable parameter of CSI RBD plugin daemonset update strategy. | `1` |
| `csi.rbdPodLabels` | Labels to add to the CSI RBD Deployments and DaemonSets Pods | `nil` |
| `csi.registrar.repository` | Kubernetes CSI registrar image repository | `"registry.k8s.io/sig-storage/csi-node-driver-registrar"` |
-| `csi.registrar.tag` | Registrar image tag | `"v2.10.1"` |
+| `csi.registrar.tag` | Registrar image tag | `"v2.11.1"` |
| `csi.resizer.repository` | Kubernetes CSI resizer image repository | `"registry.k8s.io/sig-storage/csi-resizer"` |
-| `csi.resizer.tag` | Resizer image tag | `"v1.10.1"` |
+| `csi.resizer.tag` | Resizer image tag | `"v1.11.1"` |
| `csi.serviceMonitor.enabled` | Enable ServiceMonitor for Ceph CSI drivers | `false` |
| `csi.serviceMonitor.interval` | Service monitor scrape interval | `"10s"` |
| `csi.serviceMonitor.labels` | ServiceMonitor additional labels | `{}` |
| `csi.serviceMonitor.namespace` | Use a different namespace for the ServiceMonitor | `nil` |
| `csi.sidecarLogLevel` | Set logging level for Kubernetes-csi sidecar containers. Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. | `0` |
| `csi.snapshotter.repository` | Kubernetes CSI snapshotter image repository | `"registry.k8s.io/sig-storage/csi-snapshotter"` |
-| `csi.snapshotter.tag` | Snapshotter image tag | `"v7.0.2"` |
+| `csi.snapshotter.tag` | Snapshotter image tag | `"v8.0.1"` |
| `csi.topology.domainLabels` | domainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `nil` |
| `csi.topology.enabled` | Enable topology based provisioning | `false` |
| `currentNamespaceOnly` | Whether the operator should watch cluster CRD in its own namespace or not | `false` |
@@ -151,7 +151,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `enableOBCWatchOperatorNamespace` | Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used | `true` |
| `hostpathRequiresPrivileged` | Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions. | `false` |
| `image.pullPolicy` | Image pull policy | `"IfNotPresent"` |
-| `image.repository` | Image | `"rook/ceph"` |
+| `image.repository` | Image | `"docker.io/rook/ceph"` |
| `image.tag` | Image tag | `master` |
| `imagePullSecrets` | imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. | `nil` |
| `logLevel` | Global log level for the operator. Options: `ERROR`, `WARNING`, `INFO`, `DEBUG` | `"INFO"` |
@@ -168,7 +168,7 @@ The following table lists the configurable parameters of the rook-operator chart
| `unreachableNodeTolerationSeconds` | Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override the Kubernetes default of 5 minutes | `5` |
| `useOperatorHostNetwork` | If true, run rook operator on the host network | `nil` |
-[^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
+[^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage;role=rook-example` or `storage;` (_checks only for presence of key_)
### **Development Build**
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
index 3d509bc0a3c7..9b6e986cd36e 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/ceph-csi-drivers.md
@@ -166,9 +166,9 @@ that the controller inspects and forwards to one or more CSI-Addons sidecars for
Deploy the controller by running the following commands:
```console
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.8.0/crds.yaml
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.8.0/rbac.yaml
-kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.8.0/setup-controller.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.0/crds.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.0/rbac.yaml
+kubectl create -f https://github.com/csi-addons/kubernetes-csi-addons/releases/download/v0.9.0/setup-controller.yaml
```
This creates the required CRDs and configures permissions.
@@ -196,24 +196,24 @@ Execute the following to enable the CSI-Addons sidecars:
CSI-Addons supports the following operations:
* Reclaim Space
- * [Creating a ReclaimSpaceJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/reclaimspace.md#reclaimspacejob)
- * [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/reclaimspace.md#reclaimspacecronjob)
- * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/reclaimspace.md#annotating-perstentvolumeclaims)
- * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/reclaimspace.md#annotating-namespace)
+ * [Creating a ReclaimSpaceJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/reclaimspace.md#reclaimspacejob)
+ * [Creating a ReclaimSpaceCronJob](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/reclaimspace.md#reclaimspacecronjob)
+ * [Annotating PersistentVolumeClaims](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/reclaimspace.md#annotating-perstentvolumeclaims)
+ * [Annotating Namespace](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/reclaimspace.md#annotating-namespace)
* Network Fencing
- * [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/networkfence.md)
+ * [Creating a NetworkFence](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/networkfence.md)
* Volume Replication
- * [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/volumereplicationclass.md)
- * [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.8.0/docs/volumereplication.md)
+ * [Creating VolumeReplicationClass](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/volumereplicationclass.md)
+ * [Creating VolumeReplication CR](https://github.com/csi-addons/kubernetes-csi-addons/blob/v0.9.0/docs/volumereplication.md)
## Enable RBD and CephFS Encryption Support
Ceph-CSI supports encrypting PersistentVolumeClaims (PVCs) for both RBD and CephFS.
This can be achieved using LUKS for RBD and fscrypt for CephFS. More details on encrypting RBD PVCs can be found
-[here](https://github.com/ceph/ceph-csi/blob/v3.11.0/docs/deploy-rbd.md#encryption-for-rbd-volumes),
+[here](https://github.com/ceph/ceph-csi/blob/v3.12.0/docs/deploy-rbd.md#encryption-for-rbd-volumes),
which includes a full list of supported encryption configurations.
-More details on encrypting CephFS PVCs can be found [here](https://github.com/ceph/ceph-csi/blob/v3.11.0/docs/deploy-cephfs.md#cephfs-volume-encryption).
-A sample KMS configmap can be found [here](https://github.com/ceph/ceph-csi/blob/v3.11.0/examples/kms/vault/kms-config.yaml).
+More details on encrypting CephFS PVCs can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.0/docs/deploy-cephfs.md#cephfs-volume-encryption).
+A sample KMS configmap can be found [here](https://github.com/ceph/ceph-csi/blob/v3.12.0/examples/kms/vault/kms-config.yaml).
!!! note
Not all KMS are compatible with fscrypt. Generally, KMS that either store secrets to use directly (like Vault)
diff --git a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
index d4770171e6aa..59c0575c5a9a 100644
--- a/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
+++ b/Documentation/Storage-Configuration/Ceph-CSI/custom-images.md
@@ -18,13 +18,13 @@ kubectl -n $ROOK_OPERATOR_NAMESPACE edit configmap rook-ceph-operator-config
The default upstream images are included below, which you can change to your desired images.
```yaml
-ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.11.0"
-ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1"
-ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v4.0.1"
-ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.5.1"
-ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.10.1"
-ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2"
-ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
+ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
+ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
+ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
+ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
+ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
+ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.0"
```
### **Use private repository**
@@ -32,7 +32,7 @@ ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
If image version is not passed along with the image name in any of the variables above,
Rook will add the corresponding default version to that image.
Example: if `ROOK_CSI_CEPH_IMAGE: "quay.io/private-repo/cephcsi"` is passed,
-Rook will add internal default version and consume it as `"quay.io/private-repo/cephcsi:v3.11.0"`.
+Rook will add internal default version and consume it as `"quay.io/private-repo/cephcsi:v3.12.0"`.
### **Use default images**
diff --git a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
index 8781d91da911..8c47fa553849 100644
--- a/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
+++ b/Documentation/Storage-Configuration/Monitoring/ceph-monitoring.md
@@ -48,7 +48,7 @@ There are two sources for metrics collection:
From the root of your locally cloned Rook repo, go the monitoring directory:
```console
-$ git clone --single-branch --branch master https://github.com/rook/rook.git
+$ git clone --single-branch --branch v1.15.0 https://github.com/rook/rook.git
cd rook/deploy/examples/monitoring
```
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-swift.md b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-swift.md
new file mode 100644
index 000000000000..89dc0544a65b
--- /dev/null
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-swift.md
@@ -0,0 +1,143 @@
+---
+title: Object Store with Keystone and Swift
+---
+
+!!! note
+ The Object Store with Keystone and Swift is currently in experimental mode.
+
+Ceph RGW can integrate natively with the Swift API and Keystone via the CephObjectStore CRD. This allows native integration of Rook-operated Ceph RGWs into [OpenStack](https://www.openstack.org/) clouds.
+
+!!! note
+ Authentication via the OBC and COSI features is not affected by this configuration.
+
+## Create a Local Object Store with Keystone and Swift
+
+This example will create a `CephObjectStore` that starts the RGW service in the cluster providing a Swift API.
+Using Swift requires the use of [OpenStack Keystone](https://docs.openstack.org/keystone/latest/) as an authentication provider.
+
+The OSDs must be located on different nodes, because the [`failureDomain`](../../CRDs/Block-Storage/ceph-block-pool-crd.md#spec) is set to `host` and the `erasureCoded` chunk settings require at least 3 different OSDs (2 `dataChunks` + 1 `codingChunks`).
+
+More details on the settings available for a `CephObjectStore` (including the `Auth` section) can be found in the [Object Store CRD](../../CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings) document.
+
+Set the url in the auth section to point to the keystone service url.
+
+Prior to using keystone as authentication provider an admin user for rook to access and configure the keystone admin api is required.
+
+The user credentials for this admin user are provided by a secret in the same namespace which is referenced via the `serviceUserSecretName` property.
+The secret contains the credentials with names analogue to the environment variables used in an OpenStack `openrc` file.
+
+!!! note
+ This example requires *at least 3 bluestore OSDs*, with each OSD located on a *different node*.
+ This example assumes an existing OpenStack Keystone instance ready to use for authentication.
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: usersecret
+data:
+ OS_AUTH_TYPE: cGFzc3dvcmQ=
+ OS_IDENTITY_API_VERSION: Mw==
+ OS_PASSWORD: c2VjcmV0
+ OS_PROJECT_DOMAIN_NAME: RGVmYXVsdA==
+ OS_PROJECT_NAME: YWRtaW4=
+ OS_USER_DOMAIN_NAME: RGVmYXVsdA==
+ OS_USERNAME: YWRtaW4=
+type: Opaque
+```
+
+```yaml
+apiVersion: ceph.rook.io/v1
+kind: CephObjectStore
+metadata:
+ name: my-store
+ namespace: rook-ceph
+spec:
+ metadataPool:
+ failureDomain: host
+ replicated:
+ size: 3
+ dataPool:
+ failureDomain: host
+ erasureCoded:
+ dataChunks: 2
+ codingChunks: 1
+ auth:
+ keystone:
+ acceptedRoles:
+ - admin
+ - member
+ - service
+ implicitTenants: "swift"
+ revocationInterval: 1200
+ serviceUserSecretName: usersecret
+ tokenCacheSize: 1000
+ url: https://keystone.rook-ceph.svc/
+ protocols:
+ swift:
+ accountInUrl: true
+ urlPrefix: /swift
+ # note that s3 is enabled by default if protocols.s3.enabled is not explicitly set to false
+ preservePoolsOnDelete: true
+ gateway:
+ sslCertificateRef:
+ port: 80
+ # securePort: 443
+ instances: 1
+```
+
+After the `CephObjectStore` is created, the Rook operator will create all the pools and other resources necessary to start the service. This may take a minute to complete.
+
+```console
+kubectl create -f object.yaml
+```
+
+The start of the RGW pod(s) confirms that the object store is configured.
+
+```console
+kubectl -n rook-ceph get pod -l app=rook-ceph-rgw
+```
+
+The swift service endpoint in OpenStack/Keystone must be created, in order to use the object store in Swift using for example the [OpenStack CLI](https://docs.openstack.org/python-openstackclient/latest/).
+The endpoint url should be set to the service endpoint of the created rgw instance.
+
+```sh
+openstack service create --name swift object-store
+openstack endpoint create --region default --enable swift admin https://rook-ceph-rgw-default.rook-ceph.svc/swift/v1
+openstack endpoint create --region default --enable swift internal https://rook-ceph-rgw-default.rook-ceph.svc/swift/v1
+```
+
+Afterwards any user which has the rights to access the projects resources (as defined in the OpenStack Keystone instance) can access the object store and create container and objects.
+Here the username and project are explicitly set to reflect use of the (non-admin) user.
+
+```sh
+export OS_USERNAME=alice
+export OS_PROJECT=exampleProject
+openstack container create exampleContainer
+# put /etc/hosts in the new created container
+openstack object create exampleContainer /etc/hosts
+# retrieve and save the file
+openstack object save --file /tmp/hosts.saved exampleContainer /etc/hosts
+openstack object delete exampleContainer /etc/hosts
+openstack container delete exampleContainer
+```
+
+## Basic concepts
+
+When using Keystone as an authentication provider, Ceph uses the credentials of an admin user (provided in the secret references by `serviceUserSecretName`) to access Keystone.
+
+For each user accessing the object store using Swift, Ceph implicitly creates a user which must be represented in Keystone with an authorized counterpart.
+Keystone checks for a user of the same name. Based on the name and other parameters ((OpenStack Keystone) project, (OpenStack Keystone) role) Keystone allows or disallows access to a swift container or object. Note that the implicitly created users are creaded in addition to any users that are created through other means, so Keystone authentication is not exclusive.
+
+It is not necessary to create any users in OpenStack Keystone (except for the admin user provided in the `serviceUserSecretName`).
+
+## Keystone setup
+
+Keystone must support the v3-API-Version to be used with Rook. Other API versions are not supported.
+
+The admin user and all users accessing the Object store must exist and their authorizations configured accordingly in Keystone.
+
+## Openstack setup
+
+To use the Object Store in OpenStack using Swift the Swift service must be set and the endpoint urls for the Swift service created.
+The example configuration "Create a Local Object Store with Keystone and Swift" above contains more details and the corresponding CLI calls.
diff --git a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
index 87591920aad3..50bfcd176a20 100644
--- a/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
+++ b/Documentation/Storage-Configuration/Object-Storage-RGW/object-storage.md
@@ -2,7 +2,7 @@
title: Object Storage Overview
---
-Object storage exposes an S3 API to the storage cluster for applications to put and get data.
+Object storage exposes an S3 API and or a [Swift API](https://developer.openstack.org/api-ref/object-store/index.html) to the storage cluster for applications to put and get data.
## Prerequisites
@@ -12,7 +12,7 @@ This guide assumes a Rook cluster as explained in the [Quickstart](../../Getting
Rook can configure the Ceph Object Store for several different scenarios. See each linked section for the configuration details.
-1. Create a [local object store](#create-a-local-object-store) with dedicated Ceph pools. This option is recommended if a single object store is required, and is the simplest to get started.
+1. Create a [local object store](#create-a-local-object-store-with-s3) with dedicated Ceph pools. This option is recommended if a single object store is required, and is the simplest to get started.
2. Create [one or more object stores with shared Ceph pools](#create-local-object-stores-with-shared-pools). This option is recommended when multiple object stores are required.
3. Connect to an [RGW service in an external Ceph cluster](#connect-to-an-external-object-store), rather than create a local object store.
4. Configure [RGW Multisite](#object-multisite) to synchronize buckets between object stores in different clusters.
@@ -20,7 +20,11 @@ Rook can configure the Ceph Object Store for several different scenarios. See ea
!!! note
Updating the configuration of an object store between these types is not supported.
-### Create a Local Object Store
+Rook has the ability to either deploy an object store in Kubernetes or to connect to an external RGW service.
+Most commonly, the object store will be configured in Kubernetes by Rook.
+Alternatively see the [external section](#connect-to-an-external-object-store) to consume an existing Ceph cluster with [Rados Gateways](https://docs.ceph.com/en/quincy/radosgw/index.html) from Rook.
+
+### Create a Local Object Store with S3
The below sample will create a `CephObjectStore` that starts the RGW service in the cluster with an S3 API.
@@ -50,7 +54,7 @@ spec:
codingChunks: 1
preservePoolsOnDelete: true
gateway:
- sslCertificateRef:
+ # sslCertificateRef:
port: 80
# securePort: 443
instances: 1
@@ -159,7 +163,7 @@ spec:
dataPoolName: rgw-data-pool
preserveRadosNamespaceDataOnDelete: true
gateway:
- sslCertificateRef:
+ # sslCertificateRef:
port: 80
instances: 1
```
@@ -200,7 +204,7 @@ Then create a secret with the user credentials:
kubectl -n rook-ceph create secret generic --type="kubernetes.io/rook" rgw-admin-ops-user --from-literal=accessKey= --from-literal=secretKey=
```
-If you have an external `CephCluster` CR, you can instruct Rook to consume external gateways with the following:
+For an external CephCluster, configure Rook to consume external RGW servers with the following:
```yaml
apiVersion: ceph.rook.io/v1
@@ -216,24 +220,35 @@ spec:
# hostname: example.com
```
-Use the existing `object-external.yaml` file. Even though multiple endpoints can be specified, it is recommend to use only one endpoint. This endpoint is randomly added to `configmap` of OBC and secret of the `cephobjectstoreuser`. Rook never guarantees the randomly picked endpoint is a working one or not.
-If there are multiple endpoints, please add load balancer in front of them and use the load balancer endpoint in the `externalRgwEndpoints` list.
+See `object-external.yaml` for a more detailed example.
-When ready, the message in the `cephobjectstore` status similar to this one:
+Even though multiple `externalRgwEndpoints` can be specified, it is best to use a single endpoint.
+Only the first endpoint in the list will be advertised to any consuming resources like
+CephObjectStoreUsers, ObjectBucketClaims, or COSI resources. If there are multiple external RGW
+endpoints, add load balancer in front of them, then use the single load balancer endpoint in the
+`externalRgwEndpoints` list.
-```console
-kubectl -n rook-ceph get cephobjectstore external-store
-NAME PHASE
-external-store Ready
+## Object store endpoint
-```
+The CephObjectStore resource `status.info` contains `endpoint` (and `secureEndpoint`) fields, which
+report the endpoint that can be used to access the object store as a client. This endpoint is also
+advertised as the default endpoint for CephObjectStoreUsers, ObjectBucketClaims, and
+Container Object Store Interface (COSI) resources.
-Any pod from your cluster can now access this endpoint:
+Each object store also creates a Kubernetes service that can be used as a client endpoint from
+within the Kubernetes cluster. The DNS name of the service is
+`rook-ceph-rgw-..svc`. This service DNS name is the default
+`endpoint` (and `secureEndpoint`).
-```console
-$ curl 10.100.28.138:8080
-anonymous
-```
+For [external clusters](#connect-to-an-external-object-store), the default endpoint is the first
+`spec.gateway.externalRgwEndpoint` instead of the service DNS name.
+
+The advertised endpoint can be overridden using `advertiseEndpoint` in the
+[`spec.hosting` config](../../CRDs/Object-Storage/ceph-object-store-crd.md#hosting-settings).
+
+Rook always uses the advertised endpoint to perform management operations against the object store.
+When [TLS is enabled](#enable-tls), the TLS certificate must always specify the endpoint DNS name to
+allow secure management operations.
## Create a Bucket
@@ -490,6 +505,82 @@ kubectl -n rook-ceph get secret rook-ceph-object-user-my-store-my-user -o jsonpa
kubectl -n rook-ceph get secret rook-ceph-object-user-my-store-my-user -o jsonpath='{.data.SecretKey}' | base64 --decode
```
+## Enable TLS
+
+TLS is critical for securing object storage data access, and it is assumed as a default by many S3
+clients. TLS is enabled for CephObjectStores by configuring
+[`gateway` options](../../CRDs/Object-Storage/ceph-object-store-crd.md#gateway-settings).
+Set `securePort`, and give Rook access to a TLS certificate using `sslCertificateRef`.
+`caBundleRef` may be necessary as well to give the deployed gateway (RGW) access to the TLS
+certificate's CA signing bundle.
+
+Ceph RGW only supports a **single** TLS certificate. If the given TLS certificate is a concatenation
+of multiple certificates, only the first certificate will be used by the RGW as the server
+certificate. Therefore, the TLS certificate given must include all endpoints that clients will use
+for access as subject alternate names (SANs).
+
+The [CephObjectStore service endpoint](#object-store-endpoint) must be added as a SAN on the TLS
+certificate. If it is not possible to add the service DNS name as a SAN on the TLS certificate,
+set `hosting.advertiseEndpoint` to a TLS-approved endpoint to help ensure Rook and clients use
+secure data access.
+
+!!! note
+ OpenShift users can use add `service.beta.openshift.io/serving-cert-secret-name` as a service
+ annotation instead of using `sslCertificateRef`.
+
+## Virtual host-style Bucket Access
+
+The Ceph Object Gateway supports accessing buckets using
+[virtual host-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html)
+addressing, which allows addressing buckets using the bucket name as a subdomain in the endpoint.
+
+AWS has deprecated the the alternative path-style addressing mode which is Rook and Ceph's default.
+As a result, many end-user applications have begun to remove path-style support entirely. Many
+production clusters will have to enable virtual host-style address.
+
+Virtual host-style addressing requires 2 things:
+
+1. An endpoint that supports [wildcard addressing](https://en.wikipedia.org/wiki/Wildcard_DNS_record)
+2. CephObjectStore [hosting](../../CRDs/Object-Storage/ceph-object-store-crd.md#hosting-settings) configuration.
+
+Wildcard addressing can be configured in myriad ways. Some options:
+
+- Kubernetes [ingress loadbalancer](https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards)
+- Openshift [DNS operator](https://docs.openshift.com/container-platform/latest/networking/dns-operator.html)
+
+The minimum recommended `hosting` configuration is exemplified below. It is important to ensure that
+Rook advertises the wildcard-addressable endpoint as a priority over the default. TLS is also
+recommended for security, and the configured TLS certificate should specify the advertise endpoint.
+
+```yaml
+spec:
+ ...
+ hosting:
+ advertiseEndpoint:
+ dnsName: my.wildcard.addressable.endpoint.com
+ port: 443
+ useTls: true
+```
+
+A more complex `hosting` configuration is exemplified below. In this example, two
+wildcard-addressable endpoints are available. One is a wildcard-addressable ingress service that is
+accessible to clients outside of the Kubernetes cluster (`s3.ingress.domain.com`). The other is a
+wildcard-addressable Kubernetes cluster service (`s3.rook-ceph.svc`). The cluster service is the
+preferred advertise endpoint because the internal service avoids the possibility of the ingress
+service's router being a bottleneck for S3 client operations.
+
+```yaml
+spec:
+ ...
+ hosting:
+ advertiseEndpoint:
+ dnsName: s3.rook-ceph.svc
+ port: 443
+ useTls: true
+ dnsNames:
+ - s3.ingress.domain.com
+```
+
## Object Multisite
Multisite is a feature of Ceph that allows object stores to replicate its data over multiple Ceph clusters.
@@ -497,3 +588,10 @@ Multisite is a feature of Ceph that allows object stores to replicate its data o
Multisite also allows object stores to be independent and isolated from other object stores in a cluster.
For more information on multisite please read the [ceph multisite overview](ceph-object-multisite.md) for how to run it.
+
+## Using Swift and Keystone
+
+It is possible to access an object store using the [Swift API](https://developer.openstack.org/api-ref/object-store/index.html).
+Using Swift requires the use of [OpenStack Keystone](https://docs.openstack.org/keystone/latest/) as an authentication provider.
+
+More information on the use of Swift and Keystone can be found in the document on [Object Store with Keystone and Swift](ceph-object-swift.md).
diff --git a/Documentation/Upgrade/ceph-upgrade.md b/Documentation/Upgrade/ceph-upgrade.md
index 291f606fd4b0..0ea0e3a5109c 100644
--- a/Documentation/Upgrade/ceph-upgrade.md
+++ b/Documentation/Upgrade/ceph-upgrade.md
@@ -24,14 +24,11 @@ until all the daemons have been updated.
## Supported Versions
-Rook v1.13 supports the following Ceph versions:
+Rook v1.15 supports the following Ceph versions:
* Ceph Reef v18.2.0 or newer
* Ceph Quincy v17.2.0 or newer
-Support for Ceph Pacific (16.2.x) is removed in Rook v1.13. Upgrade to Quincy or Reef before upgrading
-to Rook v1.13.
-
!!! important
When an update is requested, the operator will check Ceph's status,
**if it is in `HEALTH_ERR` the operator will refuse to proceed with the upgrade.**
diff --git a/Documentation/Upgrade/rook-upgrade.md b/Documentation/Upgrade/rook-upgrade.md
index cd2e7c18ab9c..afdc430edb23 100644
--- a/Documentation/Upgrade/rook-upgrade.md
+++ b/Documentation/Upgrade/rook-upgrade.md
@@ -14,7 +14,7 @@ We welcome feedback and opening issues!
## Supported Versions
-This guide is for upgrading from **Rook v1.13.x to Rook v1.14.x**.
+This guide is for upgrading from **Rook v1.14.x to Rook v1.15.x**.
Please refer to the upgrade guides from previous releases for supported upgrade paths.
Rook upgrades are only supported between official releases.
@@ -22,6 +22,7 @@ Rook upgrades are only supported between official releases.
For a guide to upgrade previous versions of Rook, please refer to the version of documentation for
those releases.
+* [Upgrade 1.13 to 1.14](https://rook.io/docs/rook/v1.14/Upgrade/rook-upgrade/)
* [Upgrade 1.12 to 1.13](https://rook.io/docs/rook/v1.13/Upgrade/rook-upgrade/)
* [Upgrade 1.11 to 1.12](https://rook.io/docs/rook/v1.12/Upgrade/rook-upgrade/)
* [Upgrade 1.10 to 1.11](https://rook.io/docs/rook/v1.11/Upgrade/rook-upgrade/)
@@ -48,21 +49,21 @@ those releases.
official releases. Builds from the master branch can have functionality changed or removed at any
time without compatibility support and without prior notice.
-## Breaking changes in v1.14
+## Breaking changes in v1.15
-* The minimum supported version of Kubernetes is v1.25.
- Upgrade to Kubernetes v1.25 or higher before upgrading Rook.
-* The Rook operator config `CSI_ENABLE_READ_AFFINITY` was removed. v1.13 clusters that have modified
- this value to be `"true"` must set the option as desired in each CephCluster as documented
- [here](https://rook.github.io/docs/rook/v1.14/CRDs/Cluster/ceph-cluster-crd/#csi-driver-options)
- before upgrading to v1.14.
-* Rook is beginning the process of deprecating CSI network "holder" pods.
+* Rook has deprecated CSI network "holder" pods.
If there are pods named `csi-*plugin-holder-*` in the Rook operator namespace, see the
[detailed documentation](../CRDs/Cluster/network-providers.md#holder-pod-deprecation)
- to disable them. This is optional for v1.14, but will be required in a future release.
-* In the operator helm chart, the images for the CSI driver are now specified with separate
- `repository` and `tag` values. If the CSI images have been customized, convert them from the
- `image` value to the separated `repository` and `tag` values.
+ to disable them. This deprecation process is required before upgrading to the future Rook v1.16.
+
+* Ceph COSI driver images have been updated. This impacts existing COSI Buckets, BucketClaims, and
+ BucketAccesses. Update existing clusters following the guide
+ [here](https://github.com/rook/rook/discussions/14297).
+
+* CephObjectStore, CephObjectStoreUser, and OBC endpoint behavior has changed when CephObjectStore
+ `spec.hosting` configurations are set. Use the new `spec.hosting.advertiseEndpoint` config to
+ define required behavior as
+ [documented](../Storage-Configuration/Object-Storage-RGW/object-storage.md#object-store-endpoint).
## Considerations
@@ -79,11 +80,11 @@ With this upgrade guide, there are a few notes to consider:
Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to
another are as simple as updating the common resources and the image of the Rook operator. For
-example, when Rook v1.14.1 is released, the process of updating from v1.14.0 is as simple as running
+example, when Rook v1.15.1 is released, the process of updating from v1.15.0 is as simple as running
the following:
```console
-git clone --single-branch --depth=1 --branch v1.14.1 https://github.com/rook/rook.git
+git clone --single-branch --depth=1 --branch v1.15.1 https://github.com/rook/rook.git
cd rook/deploy/examples
```
@@ -91,11 +92,11 @@ If the Rook Operator or CephCluster are deployed into a different namespace than
`rook-ceph`, see the [Update common resources and CRDs](#1-update-common-resources-and-crds)
section for instructions on how to change the default namespaces in `common.yaml`.
-Then, apply the latest changes from v1.14, and update the Rook Operator image.
+Then, apply the latest changes from v1.15, and update the Rook Operator image.
```console
kubectl apply -f common.yaml -f crds.yaml
-kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.14.1
+kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.15.1
```
As exemplified above, it is a good practice to update Rook common resources from the example
@@ -112,7 +113,7 @@ The upgrade steps in this guide will clarify what Helm handles automatically.
!!! important
If there are pods named `csi-*plugin-holder-*` in the Rook operator namespace, set the new
- config `csi.disableHolderPods: false` in the values.yaml before upgrading to v1.14.
+ config `csi.disableHolderPods: false` in the values.yaml before upgrading to v1.15.
The `rook-ceph` helm chart upgrade performs the Rook upgrade.
The `rook-ceph-cluster` helm chart upgrade performs a [Ceph upgrade](./ceph-upgrade.md) if the Ceph image is updated.
@@ -133,9 +134,9 @@ In order to successfully upgrade a Rook cluster, the following prerequisites mus
## Rook Operator Upgrade
-The examples given in this guide upgrade a live Rook cluster running `v1.13.7` to
-the version `v1.14.0`. This upgrade should work from any official patch release of Rook v1.13 to any
-official patch release of v1.14.
+The examples given in this guide upgrade a live Rook cluster running `v1.14.9` to
+the version `v1.15.0`. This upgrade should work from any official patch release of Rook v1.14 to any
+official patch release of v1.15.
Let's get started!
@@ -161,7 +162,7 @@ by the Operator. Also update the Custom Resource Definitions (CRDs).
Get the latest common resources manifests that contain the latest changes.
```console
-git clone --single-branch --depth=1 --branch master https://github.com/rook/rook.git
+git clone --single-branch --depth=1 --branch v1.15.0 https://github.com/rook/rook.git
cd rook/deploy/examples
```
@@ -196,11 +197,11 @@ kubectl apply -f deploy/examples/monitoring/rbac.yaml
!!! hint
The operator is automatically updated when using Helm charts.
-The largest portion of the upgrade is triggered when the operator's image is updated to `v1.14.x`.
+The largest portion of the upgrade is triggered when the operator's image is updated to `v1.15.x`.
When the operator is updated, it will proceed to update all of the Ceph daemons.
```console
-kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:master
+kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.15.0
```
### **3. Update Ceph CSI**
@@ -230,18 +231,18 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=
```
As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1`
-availability and `rook-version=v1.14.0`, the Ceph cluster's core components are fully updated.
+availability and `rook-version=v1.15.0`, the Ceph cluster's core components are fully updated.
```console
Every 2.0s: kubectl -n rook-ceph get deployment -o j...
-rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.14.0
-rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.14.0
-rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.14.0
-rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.14.0
-rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.14.0
-rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.13.7
-rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.13.7
+rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.15.0
+rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.15.0
+rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.15.0
+rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.15.0
+rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.15.0
+rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.14.9
+rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.14.9
```
An easy check to see if the upgrade is totally finished is to check that there is only one
@@ -250,21 +251,21 @@ An easy check to see if the upgrade is totally finished is to check that there i
```console
# kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq
This cluster is not yet finished:
- rook-version=v1.13.7
- rook-version=v1.14.0
+ rook-version=v1.14.9
+ rook-version=v1.15.0
This cluster is finished:
- rook-version=v1.14.0
+ rook-version=v1.15.0
```
### **5. Verify the updated cluster**
-At this point, the Rook operator should be running version `rook/ceph:v1.14.0`.
+At this point, the Rook operator should be running version `rook/ceph:v1.15.0`.
Verify the CephCluster health using the [health verification doc](health-verification.md).
### **6. Disable holder pods**
-Rook is beginning the process of deprecating CSI network "holder" pods. If there are pods named
+Rook has deprecated CSI network "holder" pods. If there are pods named
`csi-*plugin-holder-*` in the Rook operator namespace, see the
[detailed documentation](../CRDs/Cluster/network-providers.md#holder-pod-deprecation)
-to disable them. This is optional for v1.14, but will be required in a future release.
+to disable them. This deprecation process is required before upgrading to the future Rook v1.16.
diff --git a/Makefile b/Makefile
index de860b82fd99..5922c4b14c4a 100644
--- a/Makefile
+++ b/Makefile
@@ -32,7 +32,7 @@ all: build
# Controller-gen version
# f284e2e8... is master ahead of v0.5.0 which has ability to generate embedded objectmeta in CRDs
-CONTROLLER_GEN_VERSION=v0.14.0
+CONTROLLER_GEN_VERSION=v0.16.1
# Set GOBIN
ifeq (,$(shell go env GOBIN))
diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md
index 40d5e84f4e20..2ef38449c60b 100644
--- a/PendingReleaseNotes.md
+++ b/PendingReleaseNotes.md
@@ -2,11 +2,27 @@
## Breaking Changes
-- Updating Ceph COSI driver images, this impact existing COSI `Buckets` and `BucketAccesses`,
-please update the `BucketClass` and `BucketAccessClass` for resolving refer [here](https://github.com/rook/rook/discussions/14297)
-- During CephBlockPool updates, return an error if an invalid device class is specified. Pools with invalid device classes may start failing reconcile until the correct device class is specified. See #14057.
+- Rook has deprecated CSI network "holder" pods.
+ If there are pods named `csi-*plugin-holder-*` in the Rook operator namespace, see the
+ [detailed documentation](../CRDs/Cluster/network-providers.md#holder-pod-deprecation)
+ to disable them. This deprecation process is required before upgrading to the future Rook v1.16.
+- Ceph COSI driver images have been updated. This impacts existing COSI Buckets, BucketClaims, and
+ BucketAccesses. Update existing clusters following the guide
+ [here](https://github.com/rook/rook/discussions/14297).
+- During CephBlockPool updates, Rook will now return an error if an invalid device class is
+ specified. Pools with invalid device classes may start failing until the correct device class is
+ specified. For more info, see [#14057](https://github.com/rook/rook/pull/14057).
+- CephObjectStore, CephObjectStoreUser, and OBC endpoint behavior has changed when CephObjectStore
+ `spec.hosting` configurations are set. Use the new `spec.hosting.advertiseEndpoint` config to
+ define required behavior as
+ [documented](../Storage-Configuration/Object-Storage-RGW/object-storage.md#object-store-endpoint).
+- Minimum version of Kubernetes supported is increased to K8s v1.26.
## Features
- Added support for Ceph Squid (v19)
- Allow updating the device class of OSDs, if `allowDeviceClassUpdate: true` is set
+- CephObjectStore support for keystone authentication for S3 and Swift
+ (see [#9088](https://github.com/rook/rook/issues/9088)).
+- Support K8s versions v1.26 through v1.31.
+- Use fully-qualified image names (`docker.io/rook/ceph`) in operator manifests and helm charts
diff --git a/build/makelib/golang.mk b/build/makelib/golang.mk
index 7b75ea3ba920..e1ae8468e0af 100644
--- a/build/makelib/golang.mk
+++ b/build/makelib/golang.mk
@@ -132,8 +132,8 @@ go.test.unit:
@$(MAKE) $(GOJUNIT)
@echo === go test unit-tests
@mkdir -p $(GO_TEST_OUTPUT)
- CGO_ENABLED=$(CGO_ENABLED_VALUE) $(GOHOST) test -v -cover $(GO_STATIC_FLAGS) $(GO_PACKAGES)
- CGO_ENABLED=$(CGO_ENABLED_VALUE) $(GOHOST) test -v -cover $(GO_TEST_FLAGS) $(GO_STATIC_FLAGS) $(GO_PACKAGES) 2>&1 | tee $(GO_TEST_OUTPUT)/unit-tests.log
+ CGO_ENABLED=$(CGO_ENABLED_VALUE) $(GO) test -v -cover $(GO_STATIC_FLAGS) $(GO_PACKAGES)
+ CGO_ENABLED=$(CGO_ENABLED_VALUE) $(GO) test -v -cover $(GO_TEST_FLAGS) $(GO_STATIC_FLAGS) $(GO_PACKAGES) 2>&1 | tee $(GO_TEST_OUTPUT)/unit-tests.log
@cat $(GO_TEST_OUTPUT)/unit-tests.log | $(GOJUNIT) -set-exit-code > $(GO_TEST_OUTPUT)/unit-tests.xml
.PHONY:
diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml
index 402393adbe60..6b5af7536dc1 100644
--- a/deploy/charts/rook-ceph/templates/clusterrole.yaml
+++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml
@@ -24,6 +24,18 @@ rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["cephconnections"]
+ verbs: ["create", "delete", "get", "list","update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["clientprofiles"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["operatorconfigs"]
+ verbs: ["create", "delete", "get", "list" ,"update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["drivers"]
+ verbs: ["create", "delete", "get", "list" ,"update", "watch"]
---
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1
@@ -492,6 +504,9 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
@@ -640,9 +655,6 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["csinodes"]
- verbs: ["get", "list", "watch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml
index 13c8e96c9235..ea1c5230f107 100644
--- a/deploy/charts/rook-ceph/templates/configmap.yaml
+++ b/deploy/charts/rook-ceph/templates/configmap.yaml
@@ -9,6 +9,9 @@ data:
ROOK_LOG_LEVEL: {{ .Values.logLevel | quote }}
ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: {{ .Values.cephCommandsTimeoutSeconds | quote }}
ROOK_OBC_WATCH_OPERATOR_NAMESPACE: {{ .Values.enableOBCWatchOperatorNamespace | quote }}
+{{- if .Values.operatorMetricsBindAddress }}
+ ROOK_OPERATOR_METRICS_BIND_ADDRESS: {{ .Values.operatorMetricsBindAddress | quote }}
+{{- end }}
{{- if .Values.obcProvisionerNamePrefix }}
ROOK_OBC_PROVISIONER_NAME_PREFIX: {{ .Values.obcProvisionerNamePrefix | quote }}
{{- end }}
diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml
index c57d668face9..568c976430f7 100644
--- a/deploy/charts/rook-ceph/templates/resources.yaml
+++ b/deploy/charts/rook-ceph/templates/resources.yaml
@@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephblockpoolradosnamespaces.ceph.rook.io
spec:
@@ -95,7 +95,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephblockpools.ceph.rook.io
spec:
@@ -527,7 +527,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephbucketnotifications.ceph.rook.io
spec:
@@ -690,7 +690,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephbuckettopics.ceph.rook.io
spec:
@@ -850,7 +850,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephclients.ceph.rook.io
spec:
@@ -934,7 +934,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephclusters.ceph.rook.io
spec:
@@ -1284,11 +1284,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -1432,11 +1432,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -1852,7 +1852,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2078,7 +2078,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2311,7 +2311,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2373,7 +2373,6 @@ spec:
the event) or if no container name is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to have some well-defined way of
referencing a part of an object.
- TODO: this design is not final and this field is subject to change in the future.
type: string
kind:
description: |-
@@ -2444,7 +2443,6 @@ spec:
description: |-
An IPv4 or IPv6 network CIDR.
-
This naive kubebuilder regex provides immediate feedback for some typos and for a common problem
case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code.
pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$
@@ -2456,7 +2454,6 @@ spec:
description: |-
An IPv4 or IPv6 network CIDR.
-
This naive kubebuilder regex provides immediate feedback for some typos and for a common problem
case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code.
pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$
@@ -2552,15 +2549,12 @@ spec:
networks when the "multus" network provider is used. This config section is not used for
other network providers.
-
Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more:
https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
-
Refer to Multus network annotation documentation for help selecting values:
https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
-
Rook will make a best-effort attempt to automatically detect CIDR address ranges for given
network attachment definitions. Rook's methods are robust but may be imprecise for
sufficiently complicated networks. Rook's auto-detection process obtains a new IP address
@@ -2568,7 +2562,6 @@ spec:
partially detects, or if underlying networks do not support reusing old IP addresses, it is
best to use the 'addressRanges' config section to specify CIDR ranges for the Ceph cluster.
-
As a contrived example, one can use a theoretical Kubernetes-wide network for Ceph client
traffic and a theoretical Rook-only network for Ceph replication traffic as shown:
selectors:
@@ -3115,11 +3108,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -3130,6 +3121,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -3208,6 +3205,12 @@ spec:
allowDeviceClassUpdate:
description: Whether to allow updating the device class after the OSD is initially provisioned
type: boolean
+ allowOsdCrushWeightUpdate:
+ description: |-
+ Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased.
+ This allows cluster data to be rebalanced to make most effective use of new OSD space.
+ The default is false since data rebalancing can cause temporary cluster slowdown.
+ type: boolean
backfillFullRatio:
description: BackfillFullRatio is the ratio at which the cluster is too full for backfill. Backfill will be disabled if above this threshold. Default is 0.90.
maximum: 1
@@ -3312,11 +3315,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -3327,6 +3328,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -3574,7 +3581,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -4647,11 +4654,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -4662,6 +4667,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -4916,7 +4927,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -5168,7 +5179,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -5372,7 +5383,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephcosidrivers.ceph.rook.io
spec:
@@ -5941,11 +5952,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -5956,6 +5965,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -6000,7 +6015,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephfilesystemmirrors.ceph.rook.io
spec:
@@ -6578,11 +6593,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -6593,6 +6606,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -6671,7 +6690,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephfilesystems.ceph.rook.io
spec:
@@ -7151,11 +7170,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -7779,11 +7798,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -7794,6 +7811,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -7867,11 +7890,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -8241,7 +8264,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephfilesystemsubvolumegroups.ceph.rook.io
spec:
@@ -8380,7 +8403,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephnfses.ceph.rook.io
spec:
@@ -8447,13 +8470,11 @@ spec:
ConfigFiles defines where the Kerberos configuration should be sourced from. Config files
will be placed into the `/etc/krb5.conf.rook/` directory.
-
If this is left empty, Rook will not add any files. This allows you to manage the files
yourself however you wish. For example, you may build them into your custom Ceph container
image or use the Vault agent injector to securely add the files via annotations on the
CephNFS spec (passed to the NFS server pods).
-
Rook configures Kerberos to log to stderr. We suggest removing logging sections from config
files to avoid consuming unnecessary disk space from logging to files.
properties:
@@ -9255,11 +9276,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -9270,6 +9289,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -9623,11 +9648,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -10254,11 +10279,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -10269,6 +10292,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -10354,7 +10383,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephobjectrealms.ceph.rook.io
spec:
@@ -10445,7 +10474,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephobjectstores.ceph.rook.io
spec:
@@ -10506,6 +10535,41 @@ spec:
items:
type: string
type: array
+ auth:
+ description: The authentication configuration
+ properties:
+ keystone:
+ description: The spec for Keystone
+ nullable: true
+ properties:
+ acceptedRoles:
+ description: The roles requires to serve requests.
+ items:
+ type: string
+ type: array
+ implicitTenants:
+ description: Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants.
+ type: string
+ revocationInterval:
+ description: The number of seconds between token revocation checks.
+ nullable: true
+ type: integer
+ serviceUserSecretName:
+ description: The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource.
+ type: string
+ tokenCacheSize:
+ description: The maximum number of entries in each Keystone token cache.
+ nullable: true
+ type: integer
+ url:
+ description: The URL for the Keystone server.
+ type: string
+ required:
+ - acceptedRoles
+ - serviceUserSecretName
+ - url
+ type: object
+ type: object
dataPool:
description: The data pool settings
nullable: true
@@ -11277,11 +11341,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -11292,6 +11354,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -11394,11 +11462,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -11540,11 +11608,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -11647,15 +11715,54 @@ spec:
type: object
type: object
hosting:
- description: Hosting settings for the object store
+ description: |-
+ Hosting settings for the object store.
+ A common use case for hosting configuration is to inform Rook of endpoints that support DNS
+ wildcards, which in turn allows virtual host-style bucket addressing.
+ nullable: true
properties:
+ advertiseEndpoint:
+ description: |-
+ AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this
+ object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims,
+ and COSI Buckets/Accesses.
+ By default, Rook returns the endpoint for the object store's Kubernetes service using HTTPS
+ with `gateway.securePort` if it is defined (otherwise, HTTP with `gateway.port`).
+ nullable: true
+ properties:
+ dnsName:
+ description: |-
+ DnsName is the DNS name (in RFC-1123 format) of the endpoint.
+ If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ wildcard itself in the list of hostnames.
+ E.g., use "mystore.example.com" instead of "*.mystore.example.com".
+ minLength: 1
+ type: string
+ port:
+ description: Port is the port on which S3 connections can be made for this endpoint.
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ useTls:
+ description: UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP).
+ type: boolean
+ required:
+ - dnsName
+ - port
+ - useTls
+ type: object
dnsNames:
description: |-
- A list of DNS names in which bucket can be accessed via virtual host path. These names need to valid according RFC-1123.
- Each domain requires wildcard support like ingress loadbalancer.
- Do not include the wildcard itself in the list of hostnames (e.g. use "mystore.example.com" instead of "*.mystore.example.com").
- Add all hostnames including user-created Kubernetes Service endpoints to the list.
- CephObjectStore Service Endpoints and CephObjectZone customEndpoints are automatically added to the list.
+ A list of DNS host names on which object store gateways will accept client S3 connections.
+ When specified, object store gateways will reject client S3 connections to hostnames that are
+ not present in this list, so include all endpoints.
+ The object store's advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone
+ `customEndpoints` are automatically added to the list but may be set here again if desired.
+ Each DNS name must be valid according RFC-1123.
+ If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ wildcard itself in the list of hostnames.
+ E.g., use "mystore.example.com" instead of "*.mystore.example.com".
The feature is supported only for Ceph v18 and later versions.
items:
type: string
@@ -11843,6 +11950,40 @@ spec:
preservePoolsOnDelete:
description: Preserve pools on object store deletion
type: boolean
+ protocols:
+ description: The protocol specification
+ properties:
+ s3:
+ description: The spec for S3
+ nullable: true
+ properties:
+ authUseKeystone:
+ description: Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply.
+ nullable: true
+ type: boolean
+ enabled:
+ description: Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled.
+ nullable: true
+ type: boolean
+ type: object
+ swift:
+ description: The spec for Swift
+ nullable: true
+ properties:
+ accountInUrl:
+ description: Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs.
+ nullable: true
+ type: boolean
+ urlPrefix:
+ description: The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled).
+ nullable: true
+ type: string
+ versioningEnabled:
+ description: Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned.
+ nullable: true
+ type: boolean
+ type: object
+ type: object
security:
description: Security represents security settings
nullable: true
@@ -11991,7 +12132,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephobjectstoreusers.ceph.rook.io
spec:
@@ -12234,7 +12375,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephobjectzonegroups.ceph.rook.io
spec:
@@ -12330,7 +12471,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephobjectzones.ceph.rook.io
spec:
@@ -12383,7 +12524,6 @@ spec:
CephObjectStore associated with this CephObjectStoreZone reachable to peer clusters.
The list can have one or more endpoints pointing to different RGW servers in the zone.
-
If a CephObjectStore endpoint is omitted from this list, that object store's gateways will
not receive multisite replication data
(see CephObjectStore.spec.gateway.disableMultisiteSyncTraffic).
@@ -12830,7 +12970,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
helm.sh/resource-policy: keep
name: cephrbdmirrors.ceph.rook.io
spec:
@@ -13425,11 +13565,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -13440,6 +13578,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml
index 88b1327b32ff..66480c6de22d 100644
--- a/deploy/charts/rook-ceph/values.yaml
+++ b/deploy/charts/rook-ceph/values.yaml
@@ -4,10 +4,10 @@
image:
# -- Image
- repository: rook/ceph
+ repository: docker.io/rook/ceph
# -- Image tag
# @default -- `master`
- tag: master
+ tag: v1.15.0
# -- Image pull policy
pullPolicy: IfNotPresent
@@ -487,37 +487,37 @@ csi:
# -- Ceph CSI image repository
repository: quay.io/cephcsi/cephcsi
# -- Ceph CSI image tag
- tag: v3.11.0
+ tag: v3.12.0
registrar:
# -- Kubernetes CSI registrar image repository
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
# -- Registrar image tag
- tag: v2.10.1
+ tag: v2.11.1
provisioner:
# -- Kubernetes CSI provisioner image repository
repository: registry.k8s.io/sig-storage/csi-provisioner
# -- Provisioner image tag
- tag: v4.0.1
+ tag: v5.0.1
snapshotter:
# -- Kubernetes CSI snapshotter image repository
repository: registry.k8s.io/sig-storage/csi-snapshotter
# -- Snapshotter image tag
- tag: v7.0.2
+ tag: v8.0.1
attacher:
# -- Kubernetes CSI Attacher image repository
repository: registry.k8s.io/sig-storage/csi-attacher
# -- Attacher image tag
- tag: v4.5.1
+ tag: v4.6.1
resizer:
# -- Kubernetes CSI resizer image repository
repository: registry.k8s.io/sig-storage/csi-resizer
# -- Resizer image tag
- tag: v1.10.1
+ tag: v1.11.1
# -- Image pull policy
imagePullPolicy: IfNotPresent
@@ -537,7 +537,7 @@ csi:
# -- CSIAddons sidecar image repository
repository: quay.io/csiaddons/k8s-sidecar
# -- CSIAddons sidecar image tag
- tag: v0.8.0
+ tag: v0.9.0
nfs:
# -- Enable the nfs csi driver
diff --git a/deploy/examples/cluster-on-pvc.yaml b/deploy/examples/cluster-on-pvc.yaml
index 2087394aa3cc..01017800ce48 100644
--- a/deploy/examples/cluster-on-pvc.yaml
+++ b/deploy/examples/cluster-on-pvc.yaml
@@ -28,7 +28,7 @@ spec:
# size appropriate for monitor data will be used.
volumeClaimTemplate:
spec:
- storageClassName: gp2
+ storageClassName: gp2-csi
resources:
requests:
storage: 10Gi
@@ -54,6 +54,7 @@ spec:
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
storage:
allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created
+ allowOsdCrushWeightUpdate: true # whether to allow resizing the OSD crush weight after osd pvc is increased
storageClassDeviceSets:
- name: set1
# The number of OSDs to create from this device set
@@ -64,7 +65,7 @@ spec:
portable: true
# Certain storage class in the Cloud are slow
# Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal
- # Currently, "gp2" has been identified as such
+ # Currently, "gp2-csi" has been identified as such
tuneDeviceClass: true
# Certain storage class in the Cloud are fast
# Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal
@@ -132,7 +133,7 @@ spec:
requests:
storage: 10Gi
# IMPORTANT: Change the storage class depending on your environment
- storageClassName: gp2
+ storageClassName: gp2-csi
volumeMode: Block
accessModes:
- ReadWriteOnce
diff --git a/deploy/examples/cluster-stretched-aws.yaml b/deploy/examples/cluster-stretched-aws.yaml
index bf82efd38165..c286e7dcc943 100644
--- a/deploy/examples/cluster-stretched-aws.yaml
+++ b/deploy/examples/cluster-stretched-aws.yaml
@@ -37,7 +37,7 @@ spec:
- name: us-east-2c
volumeClaimTemplate:
spec:
- storageClassName: gp2
+ storageClassName: gp2-csi
resources:
requests:
storage: 10Gi
@@ -85,7 +85,7 @@ spec:
resources:
requests:
storage: 10Gi
- storageClassName: gp2
+ storageClassName: gp2-csi
volumeMode: Block
accessModes:
- ReadWriteOnce
@@ -118,7 +118,7 @@ spec:
resources:
requests:
storage: 10Gi
- storageClassName: gp2
+ storageClassName: gp2-csi
volumeMode: Block
accessModes:
- ReadWriteOnce
diff --git a/deploy/examples/cluster-test.yaml b/deploy/examples/cluster-test.yaml
index 7d33a8e93f3d..bde8e182e82b 100644
--- a/deploy/examples/cluster-test.yaml
+++ b/deploy/examples/cluster-test.yaml
@@ -34,6 +34,7 @@ spec:
useAllNodes: true
useAllDevices: true
allowDeviceClassUpdate: true
+ allowOsdCrushWeightUpdate: false
#deviceFilter:
#config:
# deviceClass: testclass
diff --git a/deploy/examples/cluster.yaml b/deploy/examples/cluster.yaml
index 6e9619774ed9..11860340376e 100644
--- a/deploy/examples/cluster.yaml
+++ b/deploy/examples/cluster.yaml
@@ -194,9 +194,14 @@ spec:
annotations:
# all:
# mon:
+ # mgr:
# osd:
+ # exporter:
+ # crashcollector:
# cleanup:
# prepareosd:
+ # cmdreporter is for jobs to detect ceph and csi versions, and check network status
+ # cmdreporter:
# clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets.
# And clusterMetadata annotations will not be merged with `all` annotations.
# clusterMetadata:
@@ -257,6 +262,7 @@ spec:
# encryptedDevice: "true" # the default value for this option is "false"
# deviceClass: "myclass" # specify a device class for OSDs in the cluster
allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created
+ allowOsdCrushWeightUpdate: false # whether to allow resizing the OSD crush weight after osd pvc is increased
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml
index 62923f36f1fb..bb7703389f8c 100644
--- a/deploy/examples/common.yaml
+++ b/deploy/examples/common.yaml
@@ -46,6 +46,9 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
@@ -213,9 +216,6 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["csinodes"]
- verbs: ["get", "list", "watch"]
---
# The cluster role for managing all the cluster-specific resources in a namespace
apiVersion: rbac.authorization.k8s.io/v1
@@ -606,6 +606,18 @@ rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["cephconnections"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["clientprofiles"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["operatorconfigs"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
+ - apiGroups: ["csi.ceph.io"]
+ resources: ["drivers"]
+ verbs: ["create", "delete", "get", "list", "update", "watch"]
---
# This is required by operator-sdk to map the cluster/clusterrolebindings with SA
# otherwise operator-sdk will create a individual file for these.
diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml
index 0aa0dba90b8f..03d4288cdbb6 100644
--- a/deploy/examples/crds.yaml
+++ b/deploy/examples/crds.yaml
@@ -8,7 +8,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephblockpoolradosnamespaces.ceph.rook.io
spec:
group: ceph.rook.io
@@ -98,7 +98,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephblockpools.ceph.rook.io
spec:
group: ceph.rook.io
@@ -529,7 +529,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephbucketnotifications.ceph.rook.io
spec:
group: ceph.rook.io
@@ -691,7 +691,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephbuckettopics.ceph.rook.io
spec:
group: ceph.rook.io
@@ -850,7 +850,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephclients.ceph.rook.io
spec:
group: ceph.rook.io
@@ -933,7 +933,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephclusters.ceph.rook.io
spec:
group: ceph.rook.io
@@ -1282,11 +1282,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -1430,11 +1430,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -1850,7 +1850,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2076,7 +2076,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2309,7 +2309,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -2371,7 +2371,6 @@ spec:
the event) or if no container name is specified "spec.containers[2]" (container with
index 2 in this pod). This syntax is chosen only to have some well-defined way of
referencing a part of an object.
- TODO: this design is not final and this field is subject to change in the future.
type: string
kind:
description: |-
@@ -2442,7 +2441,6 @@ spec:
description: |-
An IPv4 or IPv6 network CIDR.
-
This naive kubebuilder regex provides immediate feedback for some typos and for a common problem
case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code.
pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$
@@ -2454,7 +2452,6 @@ spec:
description: |-
An IPv4 or IPv6 network CIDR.
-
This naive kubebuilder regex provides immediate feedback for some typos and for a common problem
case where the range spec is forgotten (e.g., /24). Rook does in-depth validation in code.
pattern: ^[0-9a-fA-F:.]{2,}\/[0-9]{1,3}$
@@ -2550,15 +2547,12 @@ spec:
networks when the "multus" network provider is used. This config section is not used for
other network providers.
-
Valid keys are "public" and "cluster". Refer to Ceph networking documentation for more:
https://docs.ceph.com/en/reef/rados/configuration/network-config-ref/
-
Refer to Multus network annotation documentation for help selecting values:
https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/how-to-use.md#run-pod-with-network-annotation
-
Rook will make a best-effort attempt to automatically detect CIDR address ranges for given
network attachment definitions. Rook's methods are robust but may be imprecise for
sufficiently complicated networks. Rook's auto-detection process obtains a new IP address
@@ -2566,7 +2560,6 @@ spec:
partially detects, or if underlying networks do not support reusing old IP addresses, it is
best to use the 'addressRanges' config section to specify CIDR ranges for the Ceph cluster.
-
As a contrived example, one can use a theoretical Kubernetes-wide network for Ceph client
traffic and a theoretical Rook-only network for Ceph replication traffic as shown:
selectors:
@@ -3113,11 +3106,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -3128,6 +3119,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -3206,6 +3203,12 @@ spec:
allowDeviceClassUpdate:
description: Whether to allow updating the device class after the OSD is initially provisioned
type: boolean
+ allowOsdCrushWeightUpdate:
+ description: |-
+ Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased.
+ This allows cluster data to be rebalanced to make most effective use of new OSD space.
+ The default is false since data rebalancing can cause temporary cluster slowdown.
+ type: boolean
backfillFullRatio:
description: BackfillFullRatio is the ratio at which the cluster is too full for backfill. Backfill will be disabled if above this threshold. Default is 0.90.
maximum: 1
@@ -3310,11 +3313,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -3325,6 +3326,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -3572,7 +3579,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -4645,11 +4652,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -4660,6 +4665,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -4914,7 +4925,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -5166,7 +5177,7 @@ spec:
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
- (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -5370,7 +5381,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephcosidrivers.ceph.rook.io
spec:
group: ceph.rook.io
@@ -5938,11 +5949,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -5953,6 +5962,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -5997,7 +6012,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephfilesystemmirrors.ceph.rook.io
spec:
group: ceph.rook.io
@@ -6574,11 +6589,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -6589,6 +6602,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -6667,7 +6686,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephfilesystems.ceph.rook.io
spec:
group: ceph.rook.io
@@ -7146,11 +7165,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -7774,11 +7793,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -7789,6 +7806,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -7862,11 +7885,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -8236,7 +8259,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephfilesystemsubvolumegroups.ceph.rook.io
spec:
group: ceph.rook.io
@@ -8374,7 +8397,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephnfses.ceph.rook.io
spec:
group: ceph.rook.io
@@ -8440,13 +8463,11 @@ spec:
ConfigFiles defines where the Kerberos configuration should be sourced from. Config files
will be placed into the `/etc/krb5.conf.rook/` directory.
-
If this is left empty, Rook will not add any files. This allows you to manage the files
yourself however you wish. For example, you may build them into your custom Ceph container
image or use the Vault agent injector to securely add the files via annotations on the
CephNFS spec (passed to the NFS server pods).
-
Rook configures Kerberos to log to stderr. We suggest removing logging sections from config
files to avoid consuming unnecessary disk space from logging to files.
properties:
@@ -9248,11 +9269,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -9263,6 +9282,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -9616,11 +9641,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -10247,11 +10272,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -10262,6 +10285,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -10347,7 +10376,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephobjectrealms.ceph.rook.io
spec:
group: ceph.rook.io
@@ -10437,7 +10466,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephobjectstores.ceph.rook.io
spec:
group: ceph.rook.io
@@ -10497,6 +10526,41 @@ spec:
items:
type: string
type: array
+ auth:
+ description: The authentication configuration
+ properties:
+ keystone:
+ description: The spec for Keystone
+ nullable: true
+ properties:
+ acceptedRoles:
+ description: The roles requires to serve requests.
+ items:
+ type: string
+ type: array
+ implicitTenants:
+ description: Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants.
+ type: string
+ revocationInterval:
+ description: The number of seconds between token revocation checks.
+ nullable: true
+ type: integer
+ serviceUserSecretName:
+ description: The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource.
+ type: string
+ tokenCacheSize:
+ description: The maximum number of entries in each Keystone token cache.
+ nullable: true
+ type: integer
+ url:
+ description: The URL for the Keystone server.
+ type: string
+ required:
+ - acceptedRoles
+ - serviceUserSecretName
+ - url
+ type: object
+ type: object
dataPool:
description: The data pool settings
nullable: true
@@ -11268,11 +11332,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -11283,6 +11345,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
@@ -11385,11 +11453,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -11531,11 +11599,11 @@ spec:
format: int32
type: integer
service:
+ default: ""
description: |-
Service is the name of the service to place in the gRPC HealthCheckRequest
(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
-
If this is not specified, the default behavior is defined by gRPC.
type: string
required:
@@ -11638,15 +11706,54 @@ spec:
type: object
type: object
hosting:
- description: Hosting settings for the object store
+ description: |-
+ Hosting settings for the object store.
+ A common use case for hosting configuration is to inform Rook of endpoints that support DNS
+ wildcards, which in turn allows virtual host-style bucket addressing.
+ nullable: true
properties:
+ advertiseEndpoint:
+ description: |-
+ AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this
+ object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims,
+ and COSI Buckets/Accesses.
+ By default, Rook returns the endpoint for the object store's Kubernetes service using HTTPS
+ with `gateway.securePort` if it is defined (otherwise, HTTP with `gateway.port`).
+ nullable: true
+ properties:
+ dnsName:
+ description: |-
+ DnsName is the DNS name (in RFC-1123 format) of the endpoint.
+ If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ wildcard itself in the list of hostnames.
+ E.g., use "mystore.example.com" instead of "*.mystore.example.com".
+ minLength: 1
+ type: string
+ port:
+ description: Port is the port on which S3 connections can be made for this endpoint.
+ format: int32
+ maximum: 65535
+ minimum: 1
+ type: integer
+ useTls:
+ description: UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP).
+ type: boolean
+ required:
+ - dnsName
+ - port
+ - useTls
+ type: object
dnsNames:
description: |-
- A list of DNS names in which bucket can be accessed via virtual host path. These names need to valid according RFC-1123.
- Each domain requires wildcard support like ingress loadbalancer.
- Do not include the wildcard itself in the list of hostnames (e.g. use "mystore.example.com" instead of "*.mystore.example.com").
- Add all hostnames including user-created Kubernetes Service endpoints to the list.
- CephObjectStore Service Endpoints and CephObjectZone customEndpoints are automatically added to the list.
+ A list of DNS host names on which object store gateways will accept client S3 connections.
+ When specified, object store gateways will reject client S3 connections to hostnames that are
+ not present in this list, so include all endpoints.
+ The object store's advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone
+ `customEndpoints` are automatically added to the list but may be set here again if desired.
+ Each DNS name must be valid according RFC-1123.
+ If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ wildcard itself in the list of hostnames.
+ E.g., use "mystore.example.com" instead of "*.mystore.example.com".
The feature is supported only for Ceph v18 and later versions.
items:
type: string
@@ -11834,6 +11941,40 @@ spec:
preservePoolsOnDelete:
description: Preserve pools on object store deletion
type: boolean
+ protocols:
+ description: The protocol specification
+ properties:
+ s3:
+ description: The spec for S3
+ nullable: true
+ properties:
+ authUseKeystone:
+ description: Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply.
+ nullable: true
+ type: boolean
+ enabled:
+ description: Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled.
+ nullable: true
+ type: boolean
+ type: object
+ swift:
+ description: The spec for Swift
+ nullable: true
+ properties:
+ accountInUrl:
+ description: Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs.
+ nullable: true
+ type: boolean
+ urlPrefix:
+ description: The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled).
+ nullable: true
+ type: string
+ versioningEnabled:
+ description: Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned.
+ nullable: true
+ type: boolean
+ type: object
+ type: object
security:
description: Security represents security settings
nullable: true
@@ -11982,7 +12123,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephobjectstoreusers.ceph.rook.io
spec:
group: ceph.rook.io
@@ -12224,7 +12365,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephobjectzonegroups.ceph.rook.io
spec:
group: ceph.rook.io
@@ -12319,7 +12460,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephobjectzones.ceph.rook.io
spec:
group: ceph.rook.io
@@ -12371,7 +12512,6 @@ spec:
CephObjectStore associated with this CephObjectStoreZone reachable to peer clusters.
The list can have one or more endpoints pointing to different RGW servers in the zone.
-
If a CephObjectStore endpoint is omitted from this list, that object store's gateways will
not receive multisite replication data
(see CephObjectStore.spec.gateway.disableMultisiteSyncTraffic).
@@ -12818,7 +12958,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.14.0
+ controller-gen.kubebuilder.io/version: v0.16.1
name: cephrbdmirrors.ceph.rook.io
spec:
group: ceph.rook.io
@@ -13412,11 +13552,9 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
-
This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.
-
This field is immutable. It can only be set for containers.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
@@ -13427,6 +13565,12 @@ spec:
the Pod where this field is used. It makes that resource available
inside a container.
type: string
+ request:
+ description: |-
+ Request is the name chosen for a request in the referenced claim.
+ If empty, everything from the claim is made available, otherwise
+ only the result of this request.
+ type: string
required:
- name
type: object
diff --git a/deploy/examples/csi-operator.yaml b/deploy/examples/csi-operator.yaml
new file mode 100644
index 000000000000..5eaaf127ccd6
--- /dev/null
+++ b/deploy/examples/csi-operator.yaml
@@ -0,0 +1,15835 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: cephconnections.csi.ceph.io
+spec:
+ group: csi.ceph.io
+ names:
+ kind: CephConnection
+ listKind: CephConnectionList
+ plural: cephconnections
+ singular: cephconnection
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: CephConnection is the Schema for the cephconnections API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CephConnectionSpec defines the desired state of CephConnection
+ properties:
+ monitors:
+ items:
+ type: string
+ minItems: 1
+ type: array
+ rbdMirrorDaemonCount:
+ minimum: 1
+ type: integer
+ readAffinity:
+ description: ReadAffinitySpec capture Ceph CSI read affinity settings
+ properties:
+ crushLocationLabels:
+ items:
+ type: string
+ minItems: 1
+ type: array
+ type: object
+ required:
+ - monitors
+ type: object
+ status:
+ description: CephConnectionStatus defines the observed state of CephConnection
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: clientprofilemappings.csi.ceph.io
+spec:
+ group: csi.ceph.io
+ names:
+ kind: ClientProfileMapping
+ listKind: ClientProfileMappingList
+ plural: clientprofilemappings
+ singular: clientprofilemapping
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description:
+ ClientProfileMapping is the Schema for the clientprofilemappings
+ API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ClientProfileMappingSpec defines the desired state of ClientProfileMapping
+ properties:
+ mappings:
+ items:
+ description:
+ MappingsSpec define a mapping between a local and remote
+ profiles
+ properties:
+ blockPoolIdMapping:
+ items:
+ items:
+ type: string
+ maxItems: 2
+ minItems: 2
+ type: array
+ type: array
+ localClientProfile:
+ type: string
+ remoteClientProfile:
+ type: string
+ type: object
+ type: array
+ type: object
+ status:
+ description:
+ ClientProfileMappingStatus defines the observed state of
+ ClientProfileMapping
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: clientprofiles.csi.ceph.io
+spec:
+ group: csi.ceph.io
+ names:
+ kind: ClientProfile
+ listKind: ClientProfileList
+ plural: clientprofiles
+ singular: clientprofile
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: ClientProfile is the Schema for the clientprofiles API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ ClientProfileSpec defines the desired state of Ceph CSI
+ configuration for volumes and snapshots configured to use
+ this profile
+ properties:
+ cephConnectionRef:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: "'.name' cannot be empty"
+ rule: self.name != ""
+ cephFs:
+ description: CephFsConfigSpec defines the desired CephFs configuration
+ properties:
+ fuseMountOptions:
+ additionalProperties:
+ type: string
+ type: object
+ kernelMountOptions:
+ additionalProperties:
+ type: string
+ type: object
+ subVolumeGroup:
+ type: string
+ type: object
+ nfs:
+ description: NfsConfigSpec cdefines the desired NFS configuration
+ type: object
+ rbd:
+ description: RbdConfigSpec defines the desired RBD configuration
+ properties:
+ radosNamespace:
+ type: string
+ type: object
+ required:
+ - cephConnectionRef
+ type: object
+ status:
+ description: |-
+ ClientProfileStatus defines the observed state of Ceph CSI
+ configuration for volumes and snapshots configured to use
+ this profile
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: drivers.csi.ceph.io
+spec:
+ group: csi.ceph.io
+ names:
+ kind: Driver
+ listKind: DriverList
+ plural: drivers
+ singular: driver
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: Driver is the Schema for the drivers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: DriverSpec defines the desired state of Driver
+ properties:
+ attachRequired:
+ description: |-
+ Whether to skip any attach operation altogether for CephCsi PVCs.
+ See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ If set to false it skips the volume attachments and makes the creation of pods using the CephCsi PVC fast.
+ **WARNING** It's highly discouraged to use this for RWO volumes. for RBD PVC it can cause data corruption,
+ csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false
+ since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+ Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ type: boolean
+ cephFsClientType:
+ description: |-
+ Select between between cephfs kernel driver and ceph-fuse
+ If you select a non-kernel client, your application may be disrupted during upgrade.
+ See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
+ NOTE! cephfs quota is not supported in kernel version < 4.17
+ enum:
+ - autodetect
+ - kernel
+ type: string
+ clusterName:
+ description: |-
+ Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
+ when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
+ type: string
+ controllerPlugin:
+ description: Driver's controller plugin configuration
+ properties:
+ affinity:
+ description: Pod's affinity settings
+ properties:
+ nodeAffinity:
+ description:
+ Describes node affinity scheduling rules for
+ the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description:
+ A node selector term, associated with
+ the corresponding weight.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description:
+ Weight associated with matching the
+ corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description:
+ Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description:
+ Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description:
+ Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone, etc.
+ as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ description: Pod's annotations
+ type: object
+ imagePullPolicy:
+ description:
+ To indicate the image pull policy to be applied to
+ all the containers in the csi driver pods.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Pod's labels
+ type: object
+ priorityClassName:
+ description: Pod's user defined priority class name
+ type: string
+ privileged:
+ description: |-
+ To enable logrotation for csi pods,
+ Some platforms require controller plugin to run privileged,
+ For example, OpenShift with SELinux restrictions requires the pod to be privileged to write to hostPath.
+ type: boolean
+ replicas:
+ description:
+ Set replicas for controller plugin's deployment.
+ Defaults to 2
+ format: int32
+ minimum: 1
+ type: integer
+ resources:
+ description: Resource requirements for controller plugin's containers
+ properties:
+ addons:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ attacher:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ liveness:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ logRotator:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ omapGenerator:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ plugin:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ provisioner:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ resizer:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ snapshotter:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ type: object
+ serviceAccountName:
+ description: Service account name to be used for driver's pods
+ type: string
+ tolerations:
+ description: Pod's tolerations list
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ volumes:
+ description:
+ Volume and volume mount definitions to attach to
+ the pod
+ items:
+ properties:
+ mount:
+ description:
+ VolumeMount describes a mounting of a Volume
+ within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ volume:
+ description:
+ Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description:
+ azureDisk represents an Azure Data Disk
+ mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description:
+ "cachingMode is the Host Caching mode:
+ None, Read Only, Read Write."
+ type: string
+ diskName:
+ description:
+ diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description:
+ diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description:
+ "kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared"
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description:
+ azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description:
+ secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description:
+ cephFS represents a Ceph FS mount on the
+ host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description:
+ "path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /"
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description:
+ configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description:
+ csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description:
+ downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description:
+ Items is a list of downward API volume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects a field of
+ the pod: only annotations, labels, name,
+ namespace and uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the '..' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name: required
+ for volumes, optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: "Required: resource to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description:
+ Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description:
+ Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description:
+ selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description:
+ volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description:
+ fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description: "lun is Optional: FC target lun number"
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description:
+ "targetWWNs is Optional: FC target
+ worldwide names (WWNs)"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description:
+ driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description:
+ "options is Optional: this field holds
+ extra command options if any."
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description:
+ flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description:
+ datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description:
+ revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description:
+ chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description:
+ chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description:
+ secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description:
+ photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description:
+ pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description:
+ portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description:
+ volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description:
+ projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description:
+ Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description:
+ Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description:
+ configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description:
+ downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description:
+ Items is a list of DownwardAPIVolume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and uid
+ are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the '..' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name:
+ required for volumes, optional
+ for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required: resource
+ to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description:
+ secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description:
+ serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description:
+ quobyte represents a Quobyte mount on the
+ host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description:
+ volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description:
+ scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description:
+ gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description:
+ protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description:
+ sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description:
+ storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description:
+ system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description:
+ optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description:
+ storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description:
+ vsphereVolume represents a vSphere volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description:
+ storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description:
+ storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description:
+ volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ deployCsiAddons:
+ description: |-
+ TODO: do we want Csi addon specific field? or should we generalize to
+ a list of additional sidecars?
+ type: boolean
+ enableMetadata:
+ description: |-
+ Set to true to enable adding volume metadata on the CephFS subvolumes and RBD images.
+ Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+ Hence enable metadata is false by default.
+ type: boolean
+ encryption:
+ description: Driver's encryption settings
+ properties:
+ configMapName:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: "'.name' cannot be empty"
+ rule: self.name != ""
+ type: object
+ fsGroupPolicy:
+ description: |-
+ Policy for modifying a volume's ownership or permissions when the PVC is being mounted.
+ supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ type: string
+ fuseMountOptions:
+ additionalProperties:
+ type: string
+ description: Set mount options to use when using the Fuse client
+ type: object
+ generateOMapInfo:
+ description: |-
+ OMAP generator will generate the omap mapping between the PV name and the RBD image.
+ Need to be enabled when we are using rbd mirroring feature.
+ By default OMAP generator sidecar is deployed with Csi controller plugin pod, to disable
+ it set it to false.
+ type: boolean
+ grpcTimeout:
+ description:
+ Set the gRPC timeout for gRPC call issued by the driver
+ components
+ minimum: 0
+ type: integer
+ imageSet:
+ description: |-
+ A reference to a ConfigMap resource holding image overwrite for deployed
+ containers
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: "'.name' cannot be empty"
+ rule: self.name != ""
+ kernelMountOptions:
+ additionalProperties:
+ type: string
+ description: |-
+ Set mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options
+ Set to "ms_mode=secure" when connections.encrypted is enabled in Ceph
+ type: object
+ leaderElection:
+ description: Leader election setting
+ properties:
+ leaseDuration:
+ description: |-
+ Duration in seconds that non-leader candidates will wait to force acquire leadership.
+ Default to 137 seconds.
+ minimum: 0
+ type: integer
+ renewDeadline:
+ description: |-
+ Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+ Defaults to 107 seconds.
+ minimum: 0
+ type: integer
+ retryPeriod:
+ description: |-
+ Retry Period in seconds the LeaderElector clients should wait between tries of actions.
+ Defaults to 26 seconds.
+ minimum: 0
+ type: integer
+ type: object
+ liveness:
+ description: |-
+ Liveness metrics configuration.
+ disabled by default.
+ properties:
+ metricsPort:
+ description: Port to expose liveness metrics
+ maximum: 65535
+ minimum: 1024
+ type: integer
+ type: object
+ log:
+ description: Logging configuration for driver's pods
+ properties:
+ rotation:
+ description: log rotation for csi pods
+ properties:
+ logHostPath:
+ description: |-
+ LogHostPath is the prefix directory path for the csi log files
+ Default to /var/lib/cephcsi
+ type: string
+ maxFiles:
+ description: |-
+ MaxFiles is the number of logrtoate files
+ Default to 7
+ type: integer
+ maxLogSize:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ MaxLogSize is the maximum size of the log file
+ per csi pods
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ periodicity:
+ description: Periodicity is the periodicity of the log rotation.
+ enum:
+ - hourly
+ - daily
+ - weekly
+ - monthly
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: Either maxLogSize or periodicity must be set
+ rule: (has(self.maxLogSize)) || (has(self.periodicity))
+ verbosity:
+ description: |-
+ Log verbosity level for driver pods,
+ Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+ Default to 0
+ maximum: 5
+ minimum: 0
+ type: integer
+ type: object
+ nodePlugin:
+ description: Driver's plugin configuration
+ properties:
+ EnableSeLinuxHostMount:
+ description:
+ Control the host mount of /etc/selinux for csi plugin
+ pods. Defaults to false
+ type: boolean
+ affinity:
+ description: Pod's affinity settings
+ properties:
+ nodeAffinity:
+ description:
+ Describes node affinity scheduling rules for
+ the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description:
+ A node selector term, associated with
+ the corresponding weight.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description:
+ Weight associated with matching the
+ corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description:
+ Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description:
+ Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description:
+ Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone, etc.
+ as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ description: Pod's annotations
+ type: object
+ imagePullPolicy:
+ description:
+ To indicate the image pull policy to be applied to
+ all the containers in the csi driver pods.
+ type: string
+ kubeletDirPath:
+ description:
+ kubelet directory path, if kubelet configured to
+ use other than /var/lib/kubelet path.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Pod's labels
+ type: object
+ priorityClassName:
+ description: Pod's user defined priority class name
+ type: string
+ resources:
+ description: Resource requirements for plugin's containers
+ properties:
+ addons:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ liveness:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ logRotator:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ plugin:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ registrar:
+ description:
+ ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ type: object
+ serviceAccountName:
+ description: Service account name to be used for driver's pods
+ type: string
+ tolerations:
+ description: Pod's tolerations list
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topology:
+ description: Topology settings for the plugin pods
+ properties:
+ domainLabels:
+ description:
+ Domain labels define which node labels to use
+ as domains for CSI nodeplugins to advertise their domains
+ items:
+ type: string
+ type: array
+ type: object
+ updateStrategy:
+ description: |-
+ Driver's plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ Default value is RollingUpdate with MaxAvailabile set to 1
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if type = "RollingUpdate".
+ ---
+ TODO: Update this to follow our convention for oneOf, whatever we decide it
+ to be. Same as Deployment `strategy.rollingUpdate`.
+ See https://github.com/kubernetes/kubernetes/issues/35345
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of nodes with an existing available DaemonSet pod that
+ can have an updated DaemonSet pod during during an update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up to a minimum of 1.
+ Default value is 0.
+ Example: when this is set to 30%, at most 30% of the total number of nodes
+ that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ can have their a new pod created before the old pod is marked as deleted.
+ The update starts by launching new pods on 30% of nodes. Once an updated
+ pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
+ on that node is marked deleted. If the old pod becomes unavailable for any
+ reason (Ready transitions to false, is evicted, or is drained) an updated
+ pod is immediatedly created on that node without considering surge limits.
+ Allowing surge implies the possibility that the resources consumed by the
+ daemonset on any given node can double if the readiness check fails, and
+ so resource intensive daemonsets should take into account that they may
+ cause evictions during disruption.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of DaemonSet pods that can be unavailable during the
+ update. Value can be an absolute number (ex: 5) or a percentage of total
+ number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ number is calculated from percentage by rounding up.
+ This cannot be 0 if MaxSurge is 0
+ Default value is 1.
+ Example: when this is set to 30%, at most 30% of the total number of nodes
+ that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ can have their pods stopped for an update at any given time. The update
+ starts by stopping at most 30% of those DaemonSet pods and then brings
+ up new DaemonSet pods in their place. Once the new pods are available,
+ it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ 70% of original number of DaemonSet pods are available at all times during
+ the update.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description:
+ Type of daemon set update. Can be "RollingUpdate"
+ or "OnDelete". Default is RollingUpdate.
+ type: string
+ type: object
+ volumes:
+ description:
+ Volume and volume mount definitions to attach to
+ the pod
+ items:
+ properties:
+ mount:
+ description:
+ VolumeMount describes a mounting of a Volume
+ within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ volume:
+ description:
+ Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description:
+ azureDisk represents an Azure Data Disk
+ mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description:
+ "cachingMode is the Host Caching mode:
+ None, Read Only, Read Write."
+ type: string
+ diskName:
+ description:
+ diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description:
+ diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description:
+ "kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared"
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description:
+ azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description:
+ secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description:
+ cephFS represents a Ceph FS mount on the
+ host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description:
+ "path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /"
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description:
+ configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description:
+ csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description:
+ downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description:
+ Items is a list of downward API volume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects a field of
+ the pod: only annotations, labels, name,
+ namespace and uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the '..' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name: required
+ for volumes, optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: "Required: resource to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description:
+ Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description:
+ Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description:
+ selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description:
+ volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description:
+ fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description: "lun is Optional: FC target lun number"
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description:
+ "targetWWNs is Optional: FC target
+ worldwide names (WWNs)"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description:
+ driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description:
+ "options is Optional: this field holds
+ extra command options if any."
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description:
+ flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description:
+ datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description:
+ revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description:
+ chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description:
+ chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description:
+ secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description:
+ photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description:
+ pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description:
+ portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description:
+ volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description:
+ projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description:
+ Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description:
+ Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description:
+ configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description:
+ downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description:
+ Items is a list of DownwardAPIVolume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and uid
+ are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the '..' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name:
+ required for volumes, optional
+ for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required: resource
+ to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description:
+ secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description:
+ serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description:
+ quobyte represents a Quobyte mount on the
+ host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description:
+ volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description:
+ scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description:
+ gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description:
+ protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description:
+ sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description:
+ storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description:
+ system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description:
+ optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description:
+ storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description:
+ vsphereVolume represents a vSphere volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description:
+ storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description:
+ storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description:
+ volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ snapshotPolicy:
+ description:
+ "Select a policy for snapshot behavior: none, autodetect,
+ snapshot, sanpshotGroup"
+ enum:
+ - none
+ - volumeGroupSnapshot
+ - volumeSnapshot
+ type: string
+ type: object
+ status:
+ description: DriverStatus defines the observed state of Driver
+ type: object
+ type: object
+ x-kubernetes-validations:
+ - message: ".metadata.name must match: '[.](rbd|cephfs|nfs).csi.ceph.com'"
+ rule: self.metadata.name.matches('^(.+\\.)?(rbd|cephfs|nfs)?\\.csi\\.ceph\\.com$')
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: operatorconfigs.csi.ceph.io
+spec:
+ group: csi.ceph.io
+ names:
+ kind: OperatorConfig
+ listKind: OperatorConfigList
+ plural: operatorconfigs
+ singular: operatorconfig
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: OperatorConfig is the Schema for the operatorconfigs API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OperatorConfigSpec defines the desired state of OperatorConfig
+ properties:
+ driverSpecDefaults:
+ description:
+ Allow overwrite of hardcoded defaults for any driver
+ managed by this operator
+ properties:
+ attachRequired:
+ description: |-
+ Whether to skip any attach operation altogether for CephCsi PVCs.
+ See more details [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ If set to false it skips the volume attachments and makes the creation of pods using the CephCsi PVC fast.
+ **WARNING** It's highly discouraged to use this for RWO volumes. for RBD PVC it can cause data corruption,
+ csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false
+ since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+ Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ type: boolean
+ cephFsClientType:
+ description: |-
+ Select between between cephfs kernel driver and ceph-fuse
+ If you select a non-kernel client, your application may be disrupted during upgrade.
+ See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
+ NOTE! cephfs quota is not supported in kernel version < 4.17
+ enum:
+ - autodetect
+ - kernel
+ type: string
+ clusterName:
+ description: |-
+ Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases
+ when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster.
+ type: string
+ controllerPlugin:
+ description: Driver's controller plugin configuration
+ properties:
+ affinity:
+ description: Pod's affinity settings
+ properties:
+ nodeAffinity:
+ description:
+ Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description:
+ A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description:
+ Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description:
+ Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description:
+ Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description:
+ Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ description: Pod's annotations
+ type: object
+ imagePullPolicy:
+ description:
+ To indicate the image pull policy to be applied
+ to all the containers in the csi driver pods.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Pod's labels
+ type: object
+ priorityClassName:
+ description: Pod's user defined priority class name
+ type: string
+ privileged:
+ description: |-
+ To enable logrotation for csi pods,
+ Some platforms require controller plugin to run privileged,
+ For example, OpenShift with SELinux restrictions requires the pod to be privileged to write to hostPath.
+ type: boolean
+ replicas:
+ description:
+ Set replicas for controller plugin's deployment.
+ Defaults to 2
+ format: int32
+ minimum: 1
+ type: integer
+ resources:
+ description:
+ Resource requirements for controller plugin's
+ containers
+ properties:
+ addons:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ attacher:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ liveness:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ logRotator:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ omapGenerator:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ plugin:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ provisioner:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ resizer:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ snapshotter:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ type: object
+ serviceAccountName:
+ description:
+ Service account name to be used for driver's
+ pods
+ type: string
+ tolerations:
+ description: Pod's tolerations list
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ volumes:
+ description:
+ Volume and volume mount definitions to attach
+ to the pod
+ items:
+ properties:
+ mount:
+ description:
+ VolumeMount describes a mounting of a Volume
+ within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ volume:
+ description:
+ Volume represents a named volume in a pod
+ that may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description:
+ azureDisk represents an Azure Data
+ Disk mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description:
+ "cachingMode is the Host Caching
+ mode: None, Read Only, Read Write."
+ type: string
+ diskName:
+ description:
+ diskName is the Name of the data
+ disk in the blob storage
+ type: string
+ diskURI:
+ description:
+ diskURI is the URI of data disk
+ in the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description:
+ "kind expected values are Shared:
+ multiple blob disks per storage account Dedicated:
+ single blob disk per storage account Managed:
+ azure managed data disk (only in managed availability
+ set). defaults to shared"
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description:
+ azureFile represents an Azure File
+ Service mount on the host and bind mount to the
+ pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description:
+ secretName is the name of secret
+ that contains Azure Storage Account Name and
+ Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description:
+ cephFS represents a Ceph FS mount on
+ the host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description:
+ "path is Optional: Used as the
+ mounted root, rather than the full Ceph tree,
+ default is /"
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description:
+ configMap represents a configMap that
+ should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description:
+ csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description:
+ downwardAPI represents downward API
+ about the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description:
+ Items is a list of downward API
+ volume file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects a field
+ of the pod: only annotations, labels,
+ name, namespace and uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the relative
+ path name of the file to be created.
+ Must not be absolute or contain the
+ '..' path. Must be utf-8 encoded.
+ The first item of the relative path
+ must not start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name: required
+ for volumes, optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required: resource to
+ select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of
+ resource being referenced
+ type: string
+ name:
+ description:
+ Name is the name of
+ resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of
+ resource being referenced
+ type: string
+ name:
+ description:
+ Name is the name of
+ resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description:
+ selector is a label query
+ over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is
+ a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description:
+ volumeName is the binding
+ reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description:
+ fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and
+ then exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description:
+ "lun is Optional: FC target lun
+ number"
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description:
+ "targetWWNs is Optional: FC target
+ worldwide names (WWNs)"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description:
+ driver is the name of the driver
+ to use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description:
+ "options is Optional: this field
+ holds extra command options if any."
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description:
+ flocker represents a Flocker volume
+ attached to a kubelet's host machine. This depends
+ on the Flocker control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description:
+ datasetUUID is the UUID of the
+ dataset. This is unique identifier of a Flocker
+ dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description:
+ revision is the commit hash for
+ the specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description:
+ chapAuthDiscovery defines whether
+ support iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description:
+ chapAuthSession defines whether
+ support iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description:
+ iqn is the target iSCSI Qualified
+ Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description:
+ lun represents iSCSI Target Lun
+ number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description:
+ secretRef is the CHAP Secret for
+ iSCSI target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description:
+ photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets
+ host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description:
+ pdID is the ID that identifies
+ Photon Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description:
+ portworxVolume represents a portworx
+ volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description:
+ volumeID uniquely identifies a
+ Portworx volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description:
+ projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description:
+ Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description:
+ Relative path from the
+ volume root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description:
+ configMap information about
+ the configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to
+ a path within a volume.
+ properties:
+ key:
+ description:
+ key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether
+ the ConfigMap or its keys must be
+ defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description:
+ downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description:
+ Items is a list of DownwardAPIVolume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile
+ represents information to create
+ the file containing the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and
+ uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of
+ the schema the FieldPath
+ is written in terms of,
+ defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the
+ field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path
+ is the relative path name
+ of the file to be created.
+ Must not be absolute or contain
+ the '..' path. Must be utf-8
+ encoded. The first item of
+ the relative path must not
+ start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container
+ name: required for volumes,
+ optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the
+ output format of the exposed
+ resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required:
+ resource to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description:
+ secret information about
+ the secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to
+ a path within a volume.
+ properties:
+ key:
+ description:
+ key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional field specify
+ whether the Secret or its key must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description:
+ serviceAccountToken is information
+ about the serviceAccountToken data to
+ project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description:
+ quobyte represents a Quobyte mount
+ on the host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description:
+ volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description:
+ scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description:
+ gateway is the host address of
+ the ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description:
+ protectionDomain is the name of
+ the ScaleIO Protection Domain for the configured
+ storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description:
+ sslEnabled Flag enable/disable
+ SSL communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description:
+ storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description:
+ system is the name of the storage
+ system as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description:
+ optional field specify whether
+ the Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description:
+ storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description:
+ vsphereVolume represents a vSphere
+ volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description:
+ storagePolicyID is the storage
+ Policy Based Management (SPBM) profile ID
+ associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description:
+ storagePolicyName is the storage
+ Policy Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description:
+ volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ deployCsiAddons:
+ description: |-
+ TODO: do we want Csi addon specific field? or should we generalize to
+ a list of additional sidecars?
+ type: boolean
+ enableMetadata:
+ description: |-
+ Set to true to enable adding volume metadata on the CephFS subvolumes and RBD images.
+ Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+ Hence enable metadata is false by default.
+ type: boolean
+ encryption:
+ description: Driver's encryption settings
+ properties:
+ configMapName:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: "'.name' cannot be empty"
+ rule: self.name != ""
+ type: object
+ fsGroupPolicy:
+ description: |-
+ Policy for modifying a volume's ownership or permissions when the PVC is being mounted.
+ supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ type: string
+ fuseMountOptions:
+ additionalProperties:
+ type: string
+ description: Set mount options to use when using the Fuse client
+ type: object
+ generateOMapInfo:
+ description: |-
+ OMAP generator will generate the omap mapping between the PV name and the RBD image.
+ Need to be enabled when we are using rbd mirroring feature.
+ By default OMAP generator sidecar is deployed with Csi controller plugin pod, to disable
+ it set it to false.
+ type: boolean
+ grpcTimeout:
+ description:
+ Set the gRPC timeout for gRPC call issued by the
+ driver components
+ minimum: 0
+ type: integer
+ imageSet:
+ description: |-
+ A reference to a ConfigMap resource holding image overwrite for deployed
+ containers
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: "'.name' cannot be empty"
+ rule: self.name != ""
+ kernelMountOptions:
+ additionalProperties:
+ type: string
+ description: |-
+ Set mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options
+ Set to "ms_mode=secure" when connections.encrypted is enabled in Ceph
+ type: object
+ leaderElection:
+ description: Leader election setting
+ properties:
+ leaseDuration:
+ description: |-
+ Duration in seconds that non-leader candidates will wait to force acquire leadership.
+ Default to 137 seconds.
+ minimum: 0
+ type: integer
+ renewDeadline:
+ description: |-
+ Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+ Defaults to 107 seconds.
+ minimum: 0
+ type: integer
+ retryPeriod:
+ description: |-
+ Retry Period in seconds the LeaderElector clients should wait between tries of actions.
+ Defaults to 26 seconds.
+ minimum: 0
+ type: integer
+ type: object
+ liveness:
+ description: |-
+ Liveness metrics configuration.
+ disabled by default.
+ properties:
+ metricsPort:
+ description: Port to expose liveness metrics
+ maximum: 65535
+ minimum: 1024
+ type: integer
+ type: object
+ log:
+ description: Logging configuration for driver's pods
+ properties:
+ rotation:
+ description: log rotation for csi pods
+ properties:
+ logHostPath:
+ description: |-
+ LogHostPath is the prefix directory path for the csi log files
+ Default to /var/lib/cephcsi
+ type: string
+ maxFiles:
+ description: |-
+ MaxFiles is the number of logrtoate files
+ Default to 7
+ type: integer
+ maxLogSize:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ MaxLogSize is the maximum size of the log
+ file per csi pods
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ periodicity:
+ description:
+ Periodicity is the periodicity of the log
+ rotation.
+ enum:
+ - hourly
+ - daily
+ - weekly
+ - monthly
+ type: string
+ type: object
+ x-kubernetes-validations:
+ - message: Either maxLogSize or periodicity must be set
+ rule: (has(self.maxLogSize)) || (has(self.periodicity))
+ verbosity:
+ description: |-
+ Log verbosity level for driver pods,
+ Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+ Default to 0
+ maximum: 5
+ minimum: 0
+ type: integer
+ type: object
+ nodePlugin:
+ description: Driver's plugin configuration
+ properties:
+ EnableSeLinuxHostMount:
+ description:
+ Control the host mount of /etc/selinux for csi
+ plugin pods. Defaults to false
+ type: boolean
+ affinity:
+ description: Pod's affinity settings
+ properties:
+ nodeAffinity:
+ description:
+ Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description:
+ A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description:
+ Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description:
+ Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description:
+ A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchFields:
+ description:
+ A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description:
+ The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description:
+ Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ podAntiAffinity:
+ description:
+ Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description:
+ The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description:
+ Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: object
+ annotations:
+ additionalProperties:
+ type: string
+ description: Pod's annotations
+ type: object
+ imagePullPolicy:
+ description:
+ To indicate the image pull policy to be applied
+ to all the containers in the csi driver pods.
+ type: string
+ kubeletDirPath:
+ description:
+ kubelet directory path, if kubelet configured
+ to use other than /var/lib/kubelet path.
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Pod's labels
+ type: object
+ priorityClassName:
+ description: Pod's user defined priority class name
+ type: string
+ resources:
+ description: Resource requirements for plugin's containers
+ properties:
+ addons:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ liveness:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ logRotator:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ plugin:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ registrar:
+ description:
+ ResourceRequirements describes the compute
+ resource requirements.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description:
+ ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ type: object
+ serviceAccountName:
+ description:
+ Service account name to be used for driver's
+ pods
+ type: string
+ tolerations:
+ description: Pod's tolerations list
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topology:
+ description: Topology settings for the plugin pods
+ properties:
+ domainLabels:
+ description:
+ Domain labels define which node labels to
+ use as domains for CSI nodeplugins to advertise their
+ domains
+ items:
+ type: string
+ type: array
+ type: object
+ updateStrategy:
+ description: |-
+ Driver's plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
+ Default value is RollingUpdate with MaxAvailabile set to 1
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if type = "RollingUpdate".
+ ---
+ TODO: Update this to follow our convention for oneOf, whatever we decide it
+ to be. Same as Deployment `strategy.rollingUpdate`.
+ See https://github.com/kubernetes/kubernetes/issues/35345
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of nodes with an existing available DaemonSet pod that
+ can have an updated DaemonSet pod during during an update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up to a minimum of 1.
+ Default value is 0.
+ Example: when this is set to 30%, at most 30% of the total number of nodes
+ that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ can have their a new pod created before the old pod is marked as deleted.
+ The update starts by launching new pods on 30% of nodes. Once an updated
+ pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
+ on that node is marked deleted. If the old pod becomes unavailable for any
+ reason (Ready transitions to false, is evicted, or is drained) an updated
+ pod is immediatedly created on that node without considering surge limits.
+ Allowing surge implies the possibility that the resources consumed by the
+ daemonset on any given node can double if the readiness check fails, and
+ so resource intensive daemonsets should take into account that they may
+ cause evictions during disruption.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of DaemonSet pods that can be unavailable during the
+ update. Value can be an absolute number (ex: 5) or a percentage of total
+ number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ number is calculated from percentage by rounding up.
+ This cannot be 0 if MaxSurge is 0
+ Default value is 1.
+ Example: when this is set to 30%, at most 30% of the total number of nodes
+ that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ can have their pods stopped for an update at any given time. The update
+ starts by stopping at most 30% of those DaemonSet pods and then brings
+ up new DaemonSet pods in their place. Once the new pods are available,
+ it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ 70% of original number of DaemonSet pods are available at all times during
+ the update.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description:
+ Type of daemon set update. Can be "RollingUpdate"
+ or "OnDelete". Default is RollingUpdate.
+ type: string
+ type: object
+ volumes:
+ description:
+ Volume and volume mount definitions to attach
+ to the pod
+ items:
+ properties:
+ mount:
+ description:
+ VolumeMount describes a mounting of a Volume
+ within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ (which defaults to None).
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ recursiveReadOnly:
+ description: |-
+ RecursiveReadOnly specifies whether read-only mounts should be handled
+ recursively.
+
+
+ If ReadOnly is false, this field has no meaning and must be unspecified.
+
+
+ If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ recursively read-only. If this field is set to IfPossible, the mount is made
+ recursively read-only, if it is supported by the container runtime. If this
+ field is set to Enabled, the mount is made recursively read-only if it is
+ supported by the container runtime, otherwise the pod will not be started and
+ an error will be generated to indicate the reason.
+
+
+ If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ None (or be unspecified, which defaults to None).
+
+
+ If this field is not specified, it is treated as an equivalent of Disabled.
+ type: string
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ volume:
+ description:
+ Volume represents a named volume in a pod
+ that may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description:
+ azureDisk represents an Azure Data
+ Disk mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description:
+ "cachingMode is the Host Caching
+ mode: None, Read Only, Read Write."
+ type: string
+ diskName:
+ description:
+ diskName is the Name of the data
+ disk in the blob storage
+ type: string
+ diskURI:
+ description:
+ diskURI is the URI of data disk
+ in the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description:
+ "kind expected values are Shared:
+ multiple blob disks per storage account Dedicated:
+ single blob disk per storage account Managed:
+ azure managed data disk (only in managed availability
+ set). defaults to shared"
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description:
+ azureFile represents an Azure File
+ Service mount on the host and bind mount to the
+ pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description:
+ secretName is the name of secret
+ that contains Azure Storage Account Name and
+ Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description:
+ cephFS represents a Ceph FS mount on
+ the host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ path:
+ description:
+ "path is Optional: Used as the
+ mounted root, rather than the full Ceph tree,
+ default is /"
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description:
+ configMap represents a configMap that
+ should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description:
+ csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description:
+ downwardAPI represents downward API
+ about the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description:
+ Items is a list of downward API
+ volume file
+ items:
+ description:
+ DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects a field
+ of the pod: only annotations, labels,
+ name, namespace and uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of the schema
+ the FieldPath is written in terms
+ of, defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the field to
+ select in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path is the relative
+ path name of the file to be created.
+ Must not be absolute or contain the
+ '..' path. Must be utf-8 encoded.
+ The first item of the relative path
+ must not start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container name: required
+ for volumes, optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required: resource to
+ select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of
+ resource being referenced
+ type: string
+ name:
+ description:
+ Name is the name of
+ resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description:
+ Kind is the type of
+ resource being referenced
+ type: string
+ name:
+ description:
+ Name is the name of
+ resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description:
+ selector is a label query
+ over volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions is
+ a list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description:
+ volumeName is the binding
+ reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description:
+ fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and
+ then exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description:
+ "lun is Optional: FC target lun
+ number"
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description:
+ "targetWWNs is Optional: FC target
+ worldwide names (WWNs)"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description:
+ driver is the name of the driver
+ to use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description:
+ "options is Optional: this field
+ holds extra command options if any."
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description:
+ flocker represents a Flocker volume
+ attached to a kubelet's host machine. This depends
+ on the Flocker control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description:
+ datasetUUID is the UUID of the
+ dataset. This is unique identifier of a Flocker
+ dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description:
+ revision is the commit hash for
+ the specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description:
+ chapAuthDiscovery defines whether
+ support iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description:
+ chapAuthSession defines whether
+ support iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description:
+ iqn is the target iSCSI Qualified
+ Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description:
+ lun represents iSCSI Target Lun
+ number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description:
+ secretRef is the CHAP Secret for
+ iSCSI target and initiator authentication
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description:
+ photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets
+ host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description:
+ pdID is the ID that identifies
+ Photon Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description:
+ portworxVolume represents a portworx
+ volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description:
+ volumeID uniquely identifies a
+ Portworx volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description:
+ projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description:
+ Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description:
+ matchExpressions
+ is a list of label selector
+ requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description:
+ key is the
+ label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description:
+ Relative path from the
+ volume root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description:
+ configMap information about
+ the configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to
+ a path within a volume.
+ properties:
+ key:
+ description:
+ key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional specify whether
+ the ConfigMap or its keys must be
+ defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description:
+ downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description:
+ Items is a list of DownwardAPIVolume
+ file
+ items:
+ description:
+ DownwardAPIVolumeFile
+ represents information to create
+ the file containing the pod field
+ properties:
+ fieldRef:
+ description:
+ "Required: Selects
+ a field of the pod: only annotations,
+ labels, name, namespace and
+ uid are supported."
+ properties:
+ apiVersion:
+ description:
+ Version of
+ the schema the FieldPath
+ is written in terms of,
+ defaults to "v1".
+ type: string
+ fieldPath:
+ description:
+ Path of the
+ field to select in the
+ specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description:
+ "Required: Path
+ is the relative path name
+ of the file to be created.
+ Must not be absolute or contain
+ the '..' path. Must be utf-8
+ encoded. The first item of
+ the relative path must not
+ start with '..'"
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description:
+ "Container
+ name: required for volumes,
+ optional for env vars"
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description:
+ Specifies the
+ output format of the exposed
+ resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description:
+ "Required:
+ resource to select"
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ secret:
+ description:
+ secret information about
+ the secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to
+ a path within a volume.
+ properties:
+ key:
+ description:
+ key is the key
+ to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ optional:
+ description:
+ optional field specify
+ whether the Secret or its key must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description:
+ serviceAccountToken is information
+ about the serviceAccountToken data to
+ project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ quobyte:
+ description:
+ quobyte represents a Quobyte mount
+ on the host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description:
+ volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description:
+ scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description:
+ gateway is the host address of
+ the ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description:
+ protectionDomain is the name of
+ the ScaleIO Protection Domain for the configured
+ storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description:
+ sslEnabled Flag enable/disable
+ SSL communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description:
+ storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description:
+ system is the name of the storage
+ system as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description:
+ Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ optional:
+ description:
+ optional field specify whether
+ the Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description:
+ storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ default: ""
+ description: |-
+ Name of the referent.
+ This field is effectively required, but due to backwards compatibility is
+ allowed to be empty. Instances of this type with an empty value here are
+ almost certainly wrong.
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description:
+ vsphereVolume represents a vSphere
+ volume attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description:
+ storagePolicyID is the storage
+ Policy Based Management (SPBM) profile ID
+ associated with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description:
+ storagePolicyName is the storage
+ Policy Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description:
+ volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: object
+ type: array
+ type: object
+ snapshotPolicy:
+ description:
+ "Select a policy for snapshot behavior: none, autodetect,
+ snapshot, sanpshotGroup"
+ enum:
+ - none
+ - volumeGroupSnapshot
+ - volumeSnapshot
+ type: string
+ type: object
+ log:
+ description:
+ OperatorLogSpec provide log related settings for the
+ operator
+ properties:
+ verbosity:
+ description: Operator's log level
+ maximum: 3
+ minimum: 0
+ type: integer
+ type: object
+ type: object
+ status:
+ description: OperatorConfigStatus defines the observed state of OperatorConfig
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-cephfs-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-cephfs-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-controller-manager
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-nfs-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-nfs-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-rbd-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ceph-csi-rbd-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: ceph-csi-cephfs-ctrlplugin-r
+ namespace: rook-ceph
+rules:
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - csiaddons.openshift.io
+ resources:
+ - csiaddonsnodes
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-leader-election-role
+ namespace: rook-ceph
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: ceph-csi-rbd-ctrlplugin-r
+ namespace: rook-ceph
+rules:
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - watch
+ - list
+ - delete
+ - update
+ - create
+ - apiGroups:
+ - csiaddons.openshift.io
+ resources:
+ - csiaddonsnodes
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: ceph-csi-rbd-nodeplugin-r
+ namespace: rook-ceph
+rules:
+ - apiGroups:
+ - csiaddons.openshift.io
+ resources:
+ - csiaddonsnodes
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-cephconnection-viewer-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - cephconnections
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - cephconnections/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-cephconnections-editor-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - cephconnections
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - cephconnections/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-cephfs-ctrlplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-cephfs-nodeplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-clientprofile-viewer-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-clientprofilemapping-editor-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-clientprofilemapping-viewer-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-clientprofiles-editor-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-driver-editor-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-driver-viewer-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-manager-role
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - cephconnections
+ verbs:
+ - delete
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofilemappings/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - clientprofiles/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - drivers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - operatorconfigs
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csidrivers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-metrics-reader
+rules:
+ - nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-nfs-ctrlplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-nfs-nodeplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-operatorconfig-editor-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - operatorconfigs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - operatorconfigs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-operatorconfig-viewer-role
+rules:
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - operatorconfigs
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - csi.ceph.io
+ resources:
+ - operatorconfigs/status
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-proxy-role
+rules:
+ - apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+ - apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-rbd-ctrlplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments/status
+ verbs:
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims/status
+ verbs:
+ - patch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotcontents/status
+ verbs:
+ - update
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch"
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ceph-csi-rbd-nodeplugin-cr
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - volumeattachments
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - serviceaccounts/token
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: ceph-csi-cephfs-ctrlplugin-rb
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ceph-csi-cephfs-ctrlplugin-r
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-cephfs-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-leader-election-rolebinding
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ceph-csi-leader-election-role
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-controller-manager
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: ceph-csi-rbd-ctrlplugin-rb
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ceph-csi-rbd-ctrlplugin-r
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-rbd-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: ceph-csi-rbd-nodeplugin-rb
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: ceph-csi-rbd-nodeplugin-r
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-rbd-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-cephfs-ctrlplugin-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-cephfs-ctrlplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-cephfs-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-cephfs-nodeplugin-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-cephfs-nodeplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-cephfs-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-manager-role
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-controller-manager
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-nfs-ctrlplugin-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-nfs-ctrlplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-nfs-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-nfs-nodeplugin-crb
+roleRef:
+ kind: ClusterRole
+ name: ceph-csi-nfs-nodeplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-nfs-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ name: ceph-csi-proxy-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-proxy-role
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-controller-manager
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-rbd-ctrlplugin-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-rbd-ctrlplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-rbd-ctrlplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ceph-csi-rbd-nodeplugin-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ceph-csi-rbd-nodeplugin-cr
+subjects:
+ - kind: ServiceAccount
+ name: ceph-csi-rbd-nodeplugin-sa
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ control-plane: controller-manager
+ name: ceph-csi-controller-manager-metrics-service
+ namespace: rook-ceph
+spec:
+ ports:
+ - name: https
+ port: 8443
+ protocol: TCP
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: ceph-csi-operator
+ control-plane: controller-manager
+ name: ceph-csi-controller-manager
+ namespace: rook-ceph
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: ceph-csi-op-controller-manager
+ template:
+ metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
+ labels:
+ control-plane: ceph-csi-op-controller-manager
+ spec:
+ containers:
+ - args:
+ - --secure-listen-address=0.0.0.0:8443
+ - --upstream=http://127.0.0.1:8080/
+ - --v=0
+ image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0
+ name: kube-rbac-proxy
+ ports:
+ - containerPort: 8443
+ name: https
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 5m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ - args:
+ - --health-probe-bind-address=:8081
+ - --metrics-bind-address=127.0.0.1:8080
+ - --leader-elect
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CSI_SERVICE_ACCOUNT_PREFIX
+ value: ceph-csi-
+ image: quay.io/cephcsi/ceph-csi-operator:v0.1.0
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources:
+ limits:
+ cpu: 500m
+ memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 64Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: ceph-csi-controller-manager
+ terminationGracePeriodSeconds: 10
diff --git a/deploy/examples/csi/rbd/raw-block-pod.yaml b/deploy/examples/csi/rbd/raw-block-pod.yaml
new file mode 100644
index 000000000000..f5b5a2142c9b
--- /dev/null
+++ b/deploy/examples/csi/rbd/raw-block-pod.yaml
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: csirbd-block-demo-pod
+spec:
+ containers:
+ - name: centos
+ image: quay.io/centos/centos:latest
+ command: ["/bin/sleep", "infinity"]
+ volumeDevices:
+ - name: mypvc
+ devicePath: /dev/xvda
+ volumes:
+ - name: mypvc
+ persistentVolumeClaim:
+ claimName: raw-block-rbd-pvc
diff --git a/deploy/examples/csi/rbd/raw-block-pvc.yaml b/deploy/examples/csi/rbd/raw-block-pvc.yaml
new file mode 100644
index 000000000000..4f38ecff2716
--- /dev/null
+++ b/deploy/examples/csi/rbd/raw-block-pvc.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: raw-block-rbd-pvc
+spec:
+ accessModes:
+ - ReadWriteOnce
+ volumeMode: Block
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: rook-ceph-block
diff --git a/deploy/examples/direct-mount.yaml b/deploy/examples/direct-mount.yaml
index 2788c7fc6d81..5ab8844009b5 100644
--- a/deploy/examples/direct-mount.yaml
+++ b/deploy/examples/direct-mount.yaml
@@ -19,7 +19,7 @@ spec:
serviceAccountName: rook-ceph-default
containers:
- name: rook-direct-mount
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
command: ["/bin/bash"]
args: ["-m", "-c", "/usr/local/bin/toolbox.sh"]
imagePullPolicy: IfNotPresent
diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt
index fe1d60c72e80..b4f1817738ce 100644
--- a/deploy/examples/images.txt
+++ b/deploy/examples/images.txt
@@ -1,11 +1,11 @@
+ docker.io/rook/ceph:v1.15.0
gcr.io/k8s-staging-sig-storage/objectstorage-sidecar:v20240513-v0.1.0-35-gefb3255
quay.io/ceph/ceph:v18.2.4
quay.io/ceph/cosi:v0.1.2
- quay.io/cephcsi/cephcsi:v3.11.0
- quay.io/csiaddons/k8s-sidecar:v0.8.0
- registry.k8s.io/sig-storage/csi-attacher:v4.5.1
- registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
- registry.k8s.io/sig-storage/csi-provisioner:v4.0.1
- registry.k8s.io/sig-storage/csi-resizer:v1.10.1
- registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2
- rook/ceph:master
+ quay.io/cephcsi/cephcsi:v3.12.0
+ quay.io/csiaddons/k8s-sidecar:v0.9.0
+ registry.k8s.io/sig-storage/csi-attacher:v4.6.1
+ registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1
+ registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
+ registry.k8s.io/sig-storage/csi-resizer:v1.11.1
+ registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml
index 20a6c9bc186a..e8b381d4d8d6 100644
--- a/deploy/examples/operator-openshift.yaml
+++ b/deploy/examples/operator-openshift.yaml
@@ -197,12 +197,12 @@ data:
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.11.0"
- # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1"
- # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.10.1"
- # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v4.0.1"
- # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2"
- # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.5.1"
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
+ # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
+ # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
+ # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
+ # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
# (Optional) set user created priorityclassName for csi plugin pods.
CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
@@ -558,7 +558,7 @@ data:
CSI_ENABLE_CSIADDONS: "false"
# Enable watch for faster recovery from rbd rwo node loss
ROOK_WATCH_FOR_NODE_FAILURE: "true"
- # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
+ # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.0"
# The GCSI RPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
@@ -673,7 +673,7 @@ spec:
serviceAccountName: rook-ceph-system
containers:
- name: rook-ceph-operator
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml
index 0df475e6fc00..2cd6fea8d990 100644
--- a/deploy/examples/operator.yaml
+++ b/deploy/examples/operator.yaml
@@ -25,9 +25,14 @@ data:
# The logging level for the operator: ERROR | WARNING | INFO | DEBUG
ROOK_LOG_LEVEL: "INFO"
+ # The address for the operator's controller-runtime metrics. 0 is disabled. :8080 serves metrics on port 8080.
+ ROOK_OPERATOR_METRICS_BIND_ADDRESS: "0"
+
# Allow using loop devices for osds in test clusters.
ROOK_CEPH_ALLOW_LOOP_DEVICES: "false"
+ # Enable CSI Operator
+ ROOK_USE_CSI_OPERATOR: "false"
# Enable the CSI driver.
# To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
ROOK_CSI_ENABLE_CEPHFS: "true"
@@ -122,12 +127,12 @@ data:
# The default version of CSI supported by Rook will be started. To change the version
# of the CSI driver to something other than what is officially supported, change
# these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.11.0"
- # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1"
- # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.10.1"
- # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v4.0.1"
- # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2"
- # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.5.1"
+ # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.12.0"
+ # ROOK_CSI_REGISTRAR_IMAGE: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
+ # ROOK_CSI_RESIZER_IMAGE: "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
+ # ROOK_CSI_PROVISIONER_IMAGE: "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
+ # ROOK_CSI_SNAPSHOTTER_IMAGE: "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
+ # ROOK_CSI_ATTACHER_IMAGE: "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
# To indicate the image pull policy to be applied to all the containers in the csi driver pods.
# ROOK_CSI_IMAGE_PULL_POLICY: "IfNotPresent"
@@ -503,7 +508,7 @@ data:
CSI_ENABLE_CSIADDONS: "false"
# Enable watch for faster recovery from rbd rwo node loss
ROOK_WATCH_FOR_NODE_FAILURE: "true"
- # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
+ # ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.9.0"
# The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
CSI_GRPC_TIMEOUT_SECONDS: "150"
@@ -597,7 +602,7 @@ spec:
serviceAccountName: rook-ceph-system
containers:
- name: rook-ceph-operator
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
args: ["ceph", "operator"]
securityContext:
runAsNonRoot: true
diff --git a/deploy/examples/osd-purge.yaml b/deploy/examples/osd-purge.yaml
index f7915180dca7..c6d901de696f 100644
--- a/deploy/examples/osd-purge.yaml
+++ b/deploy/examples/osd-purge.yaml
@@ -28,7 +28,7 @@ spec:
serviceAccountName: rook-ceph-purge-osd
containers:
- name: osd-removal
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
# TODO: Insert the OSD ID in the last parameter that is to be removed
# The OSD IDs are a comma-separated list. For example: "0" or "0,2".
# If you want to preserve the OSD PVCs, set `--preserve-pvc true`.
diff --git a/deploy/examples/sqlitevfs-client.yaml b/deploy/examples/sqlitevfs-client.yaml
index a821bd2923f1..77f4613beba5 100644
--- a/deploy/examples/sqlitevfs-client.yaml
+++ b/deploy/examples/sqlitevfs-client.yaml
@@ -111,7 +111,7 @@ spec:
initContainers:
## Setup Ceph SQLite VFS
- name: setup
- image: bitnami/kubectl:1.21.11
+ image: docker.io/bitnami/kubectl:1.21.11
command:
- /bin/bash
- -c
diff --git a/deploy/examples/toolbox-job.yaml b/deploy/examples/toolbox-job.yaml
index 940cb98660f9..c9cd1eb291af 100644
--- a/deploy/examples/toolbox-job.yaml
+++ b/deploy/examples/toolbox-job.yaml
@@ -10,7 +10,7 @@ spec:
spec:
initContainers:
- name: config-init
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
command: ["/usr/local/bin/toolbox.sh"]
args: ["--skip-watch"]
imagePullPolicy: IfNotPresent
@@ -29,7 +29,7 @@ spec:
mountPath: /var/lib/rook-ceph-mon
containers:
- name: script
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
diff --git a/deploy/examples/toolbox-operator-image.yaml b/deploy/examples/toolbox-operator-image.yaml
index 4e733c17664f..24a8b2f86a43 100644
--- a/deploy/examples/toolbox-operator-image.yaml
+++ b/deploy/examples/toolbox-operator-image.yaml
@@ -25,7 +25,7 @@ spec:
serviceAccountName: rook-ceph-default
containers:
- name: rook-ceph-tools-operator-image
- image: rook/ceph:master
+ image: docker.io/rook/ceph:v1.15.0
command:
- /bin/bash
- -c
diff --git a/design/ceph/object/store.md b/design/ceph/object/store.md
index 2bfcf442a59b..4b6c23f0ca73 100644
--- a/design/ceph/object/store.md
+++ b/design/ceph/object/store.md
@@ -397,8 +397,19 @@ the HTTPS (`securePort`) endpoint. Because the advertised endpoint is primarily
resources internal to the Kubernetes cluster, this default should be sufficient for most users, and
this is the behavior expected by users when `dnsNames` is not configured, so it should be familiar.
-When this feature is enabled, there is also ambiguity about which endpoint Rook should use for Admin
-Ops API communication. Some users have reported issues with Rook using a `dnsNames` endpoint
+When this feature is enabled, there should be no ambiguity about which endpoint Rook will use for
+Admin Ops API communication. As an HTTP server, RGW is only able to return a single TLS certificate
+to S3 clients ([more detail](https://github.com/rook/rook/issues/14530)). For maximum compatibility
+while TLS is enabled, Rook should connect to the same endpoint that users do. Internally, Rook will
+use the advertise endpoint as configured.
+
+Rook documentation will inform users that if TLS is enabled, they must give Rook a certificate that
+accepts the service endpoint. Alternately, if that is not possible, Rook will add an
+`insecureSkipTlsVerification` option to the CephObjectStore to allow users to provision a healthy
+CephObjectStore. This opens users up to machine-in-the-middle attacks, so users should be advised to
+only use it for test/proof-of-concept clusters, or to work around bugs temporarily.
+
+Some users have reported issues with Rook using a `dnsNames` endpoint
(or `advertiseEndpoint`) when they wish to set up ingress certificates after Rook deployment. The
obvious alternative is to have Rook always use the CephObjectStore service, but other users have
expressed troubles creating certificates or CAs that allow the service endpoint in the past.
@@ -424,14 +435,6 @@ While Rook add endpoints to the list for safety and convenience, users might add
which Rook should not treat as a configuration bug. Rook should also ensure the list ordering is
consistent between reconciles.
-In order to attempt to strike the best balance for everyone, and to provide the best clarity for
-users and Rook internally, Rook will always use the service endpoint for admin ops. Rook
-documentation must inform users that if TLS is enabled, they must give Rook a certificate that
-accepts the service endpoint. Alternately, if that is not possible, Rook will add an
-`insecureSkipTlsVerification` option to the CephObjectStore to allow users to provision a healthy
-CephObjectStore. This opens users up to machine-in-the-middle attacks, so users should be advised to
-only use it for test/proof-of-concept clusters, or to work around bugs temporarily.
-
Rook can refer users to this Kubernetes doc for a suggested way that they can manage certificates
in a Kubernetes cluster that work with Kubernetes services like the CephObjectStore service:
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
diff --git a/design/ceph/object/swift-and-keystone-integration.md b/design/ceph/object/swift-and-keystone-integration.md
index 27717ade5093..05fac374e65e 100644
--- a/design/ceph/object/swift-and-keystone-integration.md
+++ b/design/ceph/object/swift-and-keystone-integration.md
@@ -51,10 +51,13 @@ to further technologies (such as LDAP authentication).
create a separate `cephobjectstoreuser`, configure its access rights
to the bucket and use those credentials.
+* Support for Kubernetes Container Object Storage (COSI)
+
* Support for authentication technologies other than Keystone (e.g. LDAP)
* Exposing options that disable security features (e.g. TLS verification)
+
## Proposal details
The Object Store CRD will have to be extended to accommodate the new
@@ -80,11 +83,11 @@ Annotations:
options](https://docs.ceph.com/en/octopus/radosgw/config-ref/#keystone-settings),
the corresponding RGW option is formed by prefixing it with
`rgw_keystone_` and replacing upper case letters by their lower case
- letter followed by an underscore. E.g. `tokenCacheSize` maps to
+ letter preceded by an underscore. E.g. `tokenCacheSize` maps to
`rgw_keystone_token_cache_size`.
* `[2]` These settings are required in the `keystone` section if
present.
-* `[1]` The name of the secret containing the credentials for the
+* `[3]` The name of the secret containing the credentials for the
service user account used by RGW. It has to be in the same namespace
as the object store resource.
@@ -173,12 +176,12 @@ Annotations:
options](https://docs.ceph.com/en/octopus/radosgw/config-ref/#swift-settings),
the corresponding RGW option is formed by prefixing it with
`rgw_swift_` and replacing upper case letters by their lower case
- letter followed by an underscore. E.g. `urlPrefix` maps to
+ letter preceded by an underscore. E.g. `urlPrefix` maps to
`rgw_swift_url_prefix`. They are optional. If not given, the defaults
of the corresponding RGW option apply.
-The access to the Swift API is granted by creating a subuser of an RGW
-user. While commonly the access is granted via projects
+Access to the Swift API is granted by creating a subuser of an RGW
+user. While commonly access is granted via projects
mapped from Keystone, explicit creation of subusers is supported by
extending the `cephobjectstoreuser` resource with a new optional section
`spec.subUsers`:
diff --git a/go.mod b/go.mod
index b6b1c9aa1121..fabd403535db 100644
--- a/go.mod
+++ b/go.mod
@@ -18,9 +18,10 @@ require (
github.com/IBM/keyprotect-go-client v0.14.3
github.com/aws/aws-sdk-go v1.55.3
github.com/banzaicloud/k8s-objectmatcher v1.8.0
+ github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c
github.com/ceph/go-ceph v0.28.0
github.com/coreos/pkg v0.0.0-20230601102743-20bbbf26f4d8
- github.com/csi-addons/kubernetes-csi-addons v0.8.0
+ github.com/csi-addons/kubernetes-csi-addons v0.9.0
github.com/gemalto/kmip-go v0.0.10
github.com/go-ini/ini v1.67.0
github.com/google/go-cmp v0.6.0
@@ -33,24 +34,25 @@ require (
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2
github.com/prometheus-operator/prometheus-operator/pkg/client v0.75.2
github.com/rook/rook/pkg/apis v0.0.0-20231204200402-5287527732f7
+ github.com/sethvargo/go-password v0.2.0
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
github.com/sykesm/zap-logfmt v0.0.4
go.uber.org/automaxprocs v1.5.3
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20231127185646-65229373498e
- golang.org/x/sync v0.7.0
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
+ golang.org/x/sync v0.8.0
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.30.3
- k8s.io/apiextensions-apiserver v0.30.3
- k8s.io/apimachinery v0.30.3
+ k8s.io/api v0.31.0
+ k8s.io/apiextensions-apiserver v0.31.0
+ k8s.io/apimachinery v0.31.0
k8s.io/cli-runtime v0.30.3
- k8s.io/client-go v0.30.3
+ k8s.io/client-go v0.31.0
k8s.io/cloud-provider v0.30.3
- k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0
- sigs.k8s.io/controller-runtime v0.18.4
+ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
+ sigs.k8s.io/controller-runtime v0.19.0
sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/yaml v1.4.0
)
@@ -63,11 +65,14 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/portworx/sched-ops v1.20.4-rc1 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
)
require (
@@ -78,7 +83,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containernetworking/cni v1.2.0-rc1 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -122,11 +127,10 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
- github.com/moby/spdystream v0.2.0 // indirect
+ github.com/moby/spdystream v0.4.0 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -136,22 +140,22 @@ require (
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.18.0 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.45.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/client_golang v1.19.1 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/net v0.26.0 // indirect
+ golang.org/x/crypto v0.26.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/term v0.21.0 // indirect
- golang.org/x/text v0.16.0 // indirect
+ golang.org/x/sys v0.23.0 // indirect
+ golang.org/x/term v0.23.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.5.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
diff --git a/go.sum b/go.sum
index 0716004f7e2c..cf456c479866 100644
--- a/go.sum
+++ b/go.sum
@@ -162,14 +162,16 @@ github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c h1:JOhwt7+iM18pm9s9zAhAKGRJm615AdIaKklbUd7Z8So=
+github.com/ceph/ceph-csi-operator/api v0.0.0-20240819112305-88e6db254d6c/go.mod h1:odEUoarG26wXBCC2l4O4nMWhAz6VTKr2FRkv9yELgi8=
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83 h1:xWhLO5MR+diAsZoOcPe0zVe+JcJrqMaVbScShye6pXw=
github.com/ceph/ceph-csi/api v0.0.0-20231227104434-06f9a98b7a83/go.mod h1:ZSvtS90FCB/becFi/rjy85sSw1igchaWZfUigxN9FxY=
github.com/ceph/go-ceph v0.28.0 h1:ZjlDV9XiVmBQIe9bKbT5j2Ft/bse3Jm+Ui65yE/oFFU=
github.com/ceph/go-ceph v0.28.0/go.mod h1:EwEITEDpuFCMnFrPLbV+/Vyi59jUihgCxBKvlTWGot0=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -209,8 +211,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
-github.com/csi-addons/kubernetes-csi-addons v0.8.0 h1:zvYGp4DM6KdQzEX3dQSYKykqJdLZlxpVBJjtpbaqFjs=
-github.com/csi-addons/kubernetes-csi-addons v0.8.0/go.mod h1:dvinzoiXlqdOGDpKkYx8Jxl507BzVEEEO+SI0OmBaRI=
+github.com/csi-addons/kubernetes-csi-addons v0.9.0 h1:Hhb44WcrxtbzmpLY+uqX+DBWCI6HgA/rwQMPyvsyCc8=
+github.com/csi-addons/kubernetes-csi-addons v0.9.0/go.mod h1:/YROZDdEi1N/1Ls9rdU5W2VNjm8MK7HHApl8W4Sqt9s=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -263,6 +265,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gemalto/flume v0.13.1 h1:wB9T4HP3D+3FRTymi8BzdDHkdTY8UbzH2eVSfYHmLxQ=
github.com/gemalto/flume v0.13.1/go.mod h1:CCm9802zdB4Sy7Jx8dpHaFJjd4fF/nVfCIWBS4f8k9g=
github.com/gemalto/kmip-go v0.0.10 h1:jAAZejUdRrspKigLoA62MTmIj0T7DDDOzdxHi1cDjoU=
@@ -457,8 +461,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
+github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -651,8 +655,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@@ -669,8 +671,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
+github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
@@ -715,8 +718,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
-github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
-github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
+github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -731,8 +734,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
+github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
@@ -780,21 +783,21 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -802,8 +805,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -826,6 +829,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
+github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -887,6 +892,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
@@ -965,8 +972,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -977,8 +984,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
-golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1079,8 +1086,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1117,8 +1124,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1219,8 +1226,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1233,8 +1240,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1252,8 +1259,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1338,8 +1345,8 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1532,6 +1539,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v5 v5.7.0 h1:dGKGylPlZ/jus2g1YqhhyzfH0gPy2R8/MYUpW/OslTY=
gopkg.in/evanphx/json-patch.v5 v5.7.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1587,15 +1596,15 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
-k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
-k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
+k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
+k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
k8s.io/apiextensions-apiserver v0.18.4/go.mod h1:NYeyeYq4SIpFlPxSAB6jHPIdvu3hL0pc36wuRChybio=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
-k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U=
-k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4=
+k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk=
+k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
@@ -1607,8 +1616,8 @@ k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
-k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
+k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8=
@@ -1623,8 +1632,8 @@ k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
-k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
+k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
+k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/cloud-provider v0.30.3 h1:SNWZmllTymOTzIPJuhtZH6il/qVi75dQARRQAm9k6VY=
k8s.io/cloud-provider v0.30.3/go.mod h1:Ax0AVdHnM7tMYnJH1Ycy4SMBD98+4zA+tboUR9eYsY8=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
@@ -1672,8 +1681,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
@@ -1681,8 +1690,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I=
sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A=
-sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw=
-sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
+sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
+sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
diff --git a/pkg/apis/ceph.rook.io/v1/annotations.go b/pkg/apis/ceph.rook.io/v1/annotations.go
index b2d77dc2d06a..9610420be28c 100644
--- a/pkg/apis/ceph.rook.io/v1/annotations.go
+++ b/pkg/apis/ceph.rook.io/v1/annotations.go
@@ -72,6 +72,11 @@ func GetCephExporterAnnotations(a AnnotationsSpec) Annotations {
return mergeAllAnnotationsWithKey(a, KeyCephExporter)
}
+// GetCmdReporterAnnotations returns the Annotations for jobs detecting versions
+func GetCmdReporterAnnotations(a AnnotationsSpec) Annotations {
+ return mergeAllAnnotationsWithKey(a, KeyCmdReporter)
+}
+
func GetClusterMetadataAnnotations(a AnnotationsSpec) Annotations {
return a[KeyClusterMetadata]
}
diff --git a/pkg/apis/ceph.rook.io/v1/annotations_test.go b/pkg/apis/ceph.rook.io/v1/annotations_test.go
index e3012a482d54..b832690c3567 100644
--- a/pkg/apis/ceph.rook.io/v1/annotations_test.go
+++ b/pkg/apis/ceph.rook.io/v1/annotations_test.go
@@ -58,8 +58,9 @@ func TestCephAnnotationsMerge(t *testing.T) {
// Merge with "all"
testAnnotations = AnnotationsSpec{
- "all": {"allkey1": "allval1", "allkey2": "allval2"},
- "mgr": {"mgrkey": "mgrval"},
+ "all": {"allkey1": "allval1", "allkey2": "allval2"},
+ "mgr": {"mgrkey": "mgrval"},
+ "cmdreporter": {"myversions": "detect"},
}
a = GetMonAnnotations(testAnnotations)
assert.Equal(t, "allval1", a["allkey1"])
@@ -70,6 +71,10 @@ func TestCephAnnotationsMerge(t *testing.T) {
assert.Equal(t, "allval1", a["allkey1"])
assert.Equal(t, "allval2", a["allkey2"])
assert.Equal(t, 3, len(a))
+ b := GetCmdReporterAnnotations(testAnnotations)
+ assert.Equal(t, "detect", b["myversions"])
+ assert.Equal(t, "allval1", b["allkey1"])
+ assert.Equal(t, "allval2", b["allkey2"])
}
func TestAnnotationsSpec(t *testing.T) {
diff --git a/pkg/apis/ceph.rook.io/v1/keys.go b/pkg/apis/ceph.rook.io/v1/keys.go
index 05395f1e3423..88467d31f3d5 100644
--- a/pkg/apis/ceph.rook.io/v1/keys.go
+++ b/pkg/apis/ceph.rook.io/v1/keys.go
@@ -32,4 +32,5 @@ const (
KeyCrashCollector KeyType = "crashcollector"
KeyClusterMetadata KeyType = "clusterMetadata"
KeyCephExporter KeyType = "exporter"
+ KeyCmdReporter KeyType = "cmdreporter"
)
diff --git a/pkg/apis/ceph.rook.io/v1/labels.go b/pkg/apis/ceph.rook.io/v1/labels.go
index ad4bb14660d4..6825838f8354 100644
--- a/pkg/apis/ceph.rook.io/v1/labels.go
+++ b/pkg/apis/ceph.rook.io/v1/labels.go
@@ -87,6 +87,10 @@ func GetCephExporterLabels(a LabelsSpec) Labels {
return mergeAllLabelsWithKey(a, KeyCephExporter)
}
+func GetCmdReporterLabels(a LabelsSpec) Labels {
+ return mergeAllLabelsWithKey(a, KeyCmdReporter)
+}
+
func mergeAllLabelsWithKey(a LabelsSpec, name KeyType) Labels {
all := a.All()
if all != nil {
diff --git a/pkg/apis/ceph.rook.io/v1/labels_test.go b/pkg/apis/ceph.rook.io/v1/labels_test.go
index c4810045310e..86668fb25f66 100644
--- a/pkg/apis/ceph.rook.io/v1/labels_test.go
+++ b/pkg/apis/ceph.rook.io/v1/labels_test.go
@@ -58,8 +58,9 @@ func TestCephLabelsMerge(t *testing.T) {
// Merge with "all"
testLabels = LabelsSpec{
- "all": {"allkey1": "allval1", "allkey2": "allval2"},
- "mgr": {"mgrkey": "mgrval"},
+ "all": {"allkey1": "allval1", "allkey2": "allval2"},
+ "mgr": {"mgrkey": "mgrval"},
+ "cmdreporter": {"detect": "myversion"},
}
a = GetMonLabels(testLabels)
assert.Equal(t, "allval1", a["allkey1"])
@@ -70,6 +71,11 @@ func TestCephLabelsMerge(t *testing.T) {
assert.Equal(t, "allval1", a["allkey1"])
assert.Equal(t, "allval2", a["allkey2"])
assert.Equal(t, 3, len(a))
+ a = GetCmdReporterLabels(testLabels)
+ assert.Equal(t, "myversion", a["detect"])
+ assert.Equal(t, "allval1", a["allkey1"])
+ assert.Equal(t, "allval2", a["allkey2"])
+ assert.Equal(t, 3, len(a))
}
func TestLabelsSpec(t *testing.T) {
diff --git a/pkg/apis/ceph.rook.io/v1/object.go b/pkg/apis/ceph.rook.io/v1/object.go
index 8c245cdbf6ea..9aee85e9dd12 100644
--- a/pkg/apis/ceph.rook.io/v1/object.go
+++ b/pkg/apis/ceph.rook.io/v1/object.go
@@ -17,7 +17,10 @@ limitations under the License.
package v1
import (
+ "fmt"
+
"github.com/pkg/errors"
+ "k8s.io/apimachinery/pkg/util/validation"
)
const ServiceServingCertKey = "service.beta.openshift.io/serving-cert-secret-name"
@@ -88,6 +91,33 @@ func ValidateObjectSpec(gs *CephObjectStore) error {
if gs.Spec.Gateway.Port <= 0 && gs.Spec.Gateway.SecurePort <= 0 {
return errors.New("invalid create: either of port or securePort fields should be not be zero")
}
+
+ // check hosting spec
+ if gs.Spec.Hosting != nil {
+ if gs.Spec.Hosting.AdvertiseEndpoint != nil {
+ ep := gs.Spec.Hosting.AdvertiseEndpoint
+ errList := validation.IsDNS1123Subdomain(ep.DnsName)
+ if len(errList) > 0 {
+ return errors.Errorf("hosting.advertiseEndpoint.dnsName %q must be a valid DNS-1123 subdomain: %v", ep.DnsName, errList)
+ }
+ if ep.Port < 1 || ep.Port > 65535 {
+ return errors.Errorf("hosting.advertiseEndpoint.port %d must be between 1 and 65535", ep.Port)
+ }
+ }
+ dnsNameErrs := []string{}
+ for _, dnsName := range gs.Spec.Hosting.DNSNames {
+ errs := validation.IsDNS1123Subdomain(dnsName)
+ if len(errs) > 0 {
+ // errors do not report the domains that are errored; add them to help users
+ errs = append(errs, fmt.Sprintf("error on dns name %q", dnsName))
+ dnsNameErrs = append(dnsNameErrs, errs...)
+ }
+ }
+ if len(dnsNameErrs) > 0 {
+ return errors.Errorf("one or more hosting.dnsNames is not a valid DNS-1123 subdomain: %v", dnsNameErrs)
+ }
+ }
+
return nil
}
@@ -98,6 +128,63 @@ func (s *ObjectStoreSpec) GetServiceServingCert() string {
return ""
}
+// GetServiceName gets the name of the Rook-created CephObjectStore service.
+// This method helps ensure adherence to stable, documented behavior (API).
+func (c *CephObjectStore) GetServiceName() string {
+ return "rook-ceph-rgw-" + c.GetName()
+}
+
+// GetServiceDomainName gets the domain name of the Rook-created CephObjectStore service.
+// This method helps ensure adherence to stable, documented behavior (API).
+func (c *CephObjectStore) GetServiceDomainName() string {
+ return fmt.Sprintf("%s.%s.svc", c.GetServiceName(), c.GetNamespace())
+}
+
+func (c *CephObjectStore) AdvertiseEndpointIsSet() bool {
+ return c.Spec.Hosting != nil && c.Spec.Hosting.AdvertiseEndpoint != nil &&
+ c.Spec.Hosting.AdvertiseEndpoint.DnsName != "" && c.Spec.Hosting.AdvertiseEndpoint.Port != 0
+}
+
+// GetAdvertiseEndpoint returns address, port, and isTls information about the advertised endpoint
+// for the CephObjectStore. This method helps ensure adherence to stable, documented behavior (API).
+func (c *CephObjectStore) GetAdvertiseEndpoint() (string, int32, bool, error) {
+ port, err := c.Spec.GetPort()
+ if err != nil {
+ return "", 0, false, err
+ }
+ isTls := c.Spec.IsTLSEnabled()
+
+ address := c.GetServiceDomainName() // service domain name is the default advertise address
+ if c.Spec.IsExternal() {
+ // for external clusters, the first external RGW endpoint is the default advertise address
+ address = c.Spec.Gateway.ExternalRgwEndpoints[0].String()
+ }
+
+ // if users override the advertise endpoint themselves, these value take priority
+ if c.AdvertiseEndpointIsSet() {
+ address = c.Spec.Hosting.AdvertiseEndpoint.DnsName
+ port = c.Spec.Hosting.AdvertiseEndpoint.Port
+ isTls = c.Spec.Hosting.AdvertiseEndpoint.UseTls
+ }
+
+ return address, port, isTls, nil
+}
+
+// GetAdvertiseEndpointUrl gets the fully-formed advertised endpoint URL for the CephObjectStore.
+// This method helps ensure adherence to stable, documented behavior (API).
+func (c *CephObjectStore) GetAdvertiseEndpointUrl() (string, error) {
+ address, port, isTls, err := c.GetAdvertiseEndpoint()
+ if err != nil {
+ return "", err
+ }
+
+ protocol := "http"
+ if isTls {
+ protocol = "https"
+ }
+ return fmt.Sprintf("%s://%s:%d", protocol, address, port), nil
+}
+
func (c *CephObjectStore) GetStatusConditions() *[]Condition {
return &c.Status.Conditions
}
diff --git a/pkg/apis/ceph.rook.io/v1/object_test.go b/pkg/apis/ceph.rook.io/v1/object_test.go
index 134c86ba5e0a..47e3fe803273 100644
--- a/pkg/apis/ceph.rook.io/v1/object_test.go
+++ b/pkg/apis/ceph.rook.io/v1/object_test.go
@@ -58,6 +58,75 @@ func TestValidateObjectStoreSpec(t *testing.T) {
o.ObjectMeta.Namespace = ""
err = ValidateObjectSpec(o)
assert.Error(t, err)
+
+ t.Run("hosting", func(t *testing.T) {
+ o := &CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-store",
+ Namespace: "rook-ceph",
+ },
+ Spec: ObjectStoreSpec{
+ Gateway: GatewaySpec{
+ Port: 1,
+ SecurePort: 0,
+ },
+ Hosting: &ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &ObjectEndpointSpec{
+ DnsName: "valid.dns.addr",
+ Port: 1,
+ },
+ DNSNames: []string{"valid.dns.addr", "valid.dns.com"},
+ },
+ },
+ }
+ err := ValidateObjectSpec(o)
+ assert.NoError(t, err)
+
+ // wildcard advertise dns name
+ s := o.DeepCopy()
+ s.Spec.Hosting.AdvertiseEndpoint.DnsName = "*.invalid.dns.addr"
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, `"*.invalid.dns.addr"`)
+
+ // empty advertise dns name
+ s = o.DeepCopy()
+ s.Spec.Hosting.AdvertiseEndpoint.DnsName = ""
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, `""`)
+
+ // zero port
+ s = o.DeepCopy()
+ s.Spec.Hosting.AdvertiseEndpoint.Port = 0
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, "0")
+
+ // 65536 port
+ s = o.DeepCopy()
+ s.Spec.Hosting.AdvertiseEndpoint.Port = 65536
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, "65536")
+
+ // first dnsName invalid
+ s = o.DeepCopy()
+ s.Spec.Hosting.DNSNames = []string{"-invalid.dns.name", "accepted.dns.name"}
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, `"-invalid.dns.name"`)
+ assert.NotContains(t, err.Error(), "accepted.dns.name")
+
+ // second dnsName invalid
+ s = o.DeepCopy()
+ s.Spec.Hosting.DNSNames = []string{"accepted.dns.name", "-invalid.dns.name"}
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, `"-invalid.dns.name"`)
+ assert.NotContains(t, err.Error(), "accepted.dns.name")
+
+ // both dnsNames invalid
+ s = o.DeepCopy()
+ s.Spec.Hosting.DNSNames = []string{"*.invalid.dns.name", "-invalid.dns.name"}
+ err = ValidateObjectSpec(s)
+ assert.ErrorContains(t, err, `"-invalid.dns.name"`)
+ assert.ErrorContains(t, err, `"*.invalid.dns.name"`)
+ })
}
func TestIsTLSEnabled(t *testing.T) {
objStore := &CephObjectStore{
@@ -96,3 +165,201 @@ func TestIsTLSEnabled(t *testing.T) {
IsTLS = objStore.Spec.IsTLSEnabled()
assert.False(t, IsTLS)
}
+
+func TestCephObjectStore_GetAdvertiseEndpointUrl(t *testing.T) {
+ emptySpec := func() *CephObjectStore {
+ return &CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-store",
+ Namespace: "my-ns",
+ },
+ }
+ }
+
+ httpSpec := func() *CephObjectStore {
+ s := emptySpec()
+ s.Spec.Gateway.Port = 8080
+ return s
+ }
+
+ httpsSpec := func() *CephObjectStore {
+ s := emptySpec()
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+ return s
+ }
+
+ dualSpec := func() *CephObjectStore {
+ s := emptySpec()
+ s.Spec.Gateway.Port = 8080
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+ return s
+ }
+
+ removeCert := func(s *CephObjectStore) *CephObjectStore {
+ s.Spec.Gateway.SSLCertificateRef = ""
+ return s
+ }
+
+ initHosting := func(s *CephObjectStore) *CephObjectStore {
+ if s.Spec.Hosting == nil {
+ s.Spec.Hosting = &ObjectStoreHostingSpec{}
+ }
+ return s
+ }
+
+ addExternalIPs := func(s *CephObjectStore) *CephObjectStore {
+ s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{
+ {IP: "192.168.1.1"},
+ {IP: "192.168.1.2"},
+ }
+ return s
+ }
+
+ addExternalHostnames := func(s *CephObjectStore) *CephObjectStore {
+ s.Spec.Gateway.ExternalRgwEndpoints = []EndpointAddress{
+ {Hostname: "s3.external.com"},
+ {Hostname: "s3.other.com"},
+ }
+ return s
+ }
+
+ addNilAdvertise := func(s *CephObjectStore) *CephObjectStore {
+ s = initHosting(s)
+ s.Spec.Hosting.AdvertiseEndpoint = nil
+ return s
+ }
+
+ addAdvertiseHttp := func(s *CephObjectStore) *CephObjectStore {
+ s = initHosting(s)
+ s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{
+ DnsName: "my-endpoint.com",
+ Port: 80,
+ UseTls: false,
+ }
+ return s
+ }
+
+ addAdvertiseHttps := func(s *CephObjectStore) *CephObjectStore {
+ s = initHosting(s)
+ s.Spec.Hosting.AdvertiseEndpoint = &ObjectEndpointSpec{
+ DnsName: "my-endpoint.com",
+ Port: 443,
+ UseTls: true,
+ }
+ return s
+ }
+
+ type test struct {
+ name string
+ store *CephObjectStore
+ want string
+ wantErrContain string
+ }
+
+ // base level tests, internal mode
+ tests := []test{
+ {"nil hosting : internal : empty ", emptySpec(), "", "Port"},
+ {"nil hosting : internal : port ", httpSpec(), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""},
+ {"nil hosting : internal : securePort ", httpsSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""},
+ {"nil hosting : internal : port + securePort ", dualSpec(), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""},
+ {"nil hosting : internal : securePort, no cert ", removeCert(httpsSpec()), "", "Port"},
+ {"nil hosting : internal : port + securePort, no cert", removeCert(dualSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""},
+ {"nil hosting : external IPs : empty ", addExternalIPs(emptySpec()), "", "Port"},
+ {"nil hosting : external IPs : port ", addExternalIPs(httpSpec()), "http://192.168.1.1:8080", ""},
+ {"nil hosting : external IPs : securePort ", addExternalIPs(httpsSpec()), "https://192.168.1.1:8443", ""},
+ {"nil hosting : external IPs : port + securePort ", addExternalIPs(dualSpec()), "https://192.168.1.1:8443", ""},
+ {"nil hosting : external IPs : securePort, no cert ", addExternalIPs(removeCert(httpsSpec())), "", "Port"},
+ {"nil hosting : external IPs : port + securePort, no cert", addExternalIPs(removeCert(dualSpec())), "http://192.168.1.1:8080", ""},
+ {"nil hosting : external Hostnames: empty ", addExternalHostnames(emptySpec()), "", "Port"},
+ {"nil hosting : external Hostnames: port ", addExternalHostnames(httpSpec()), "http://s3.external.com:8080", ""},
+ {"nil hosting : external Hostnames: securePort ", addExternalHostnames(httpsSpec()), "https://s3.external.com:8443", ""},
+ {"nil hosting : external Hostnames: port + securePort ", addExternalHostnames(dualSpec()), "https://s3.external.com:8443", ""},
+ {"nil hosting : external Hostnames: securePort, no cert ", addExternalHostnames(removeCert(httpsSpec())), "", "Port"},
+ {"nil hosting : external Hostnames: port + securePort, no cert", addExternalHostnames(removeCert(dualSpec())), "http://s3.external.com:8080", ""},
+
+ {"nil advertise : internal : empty ", addNilAdvertise(emptySpec()), "", "Port"},
+ {"nil advertise : internal : port ", addNilAdvertise(httpSpec()), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""},
+ {"nil advertise : internal : securePort ", addNilAdvertise(httpsSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""},
+ {"nil advertise : internal : port + securePort ", addNilAdvertise(dualSpec()), "https://rook-ceph-rgw-my-store.my-ns.svc:8443", ""},
+ {"nil advertise : internal : securePort, no cert ", addNilAdvertise(removeCert(httpsSpec())), "", "Port"},
+ {"nil advertise : internal : port + securePort, no cert", addNilAdvertise(removeCert(dualSpec())), "http://rook-ceph-rgw-my-store.my-ns.svc:8080", ""},
+ {"nil advertise : external IPs : empty ", addNilAdvertise(addExternalIPs(emptySpec())), "", "Port"},
+ {"nil advertise : external IPs : port ", addNilAdvertise(addExternalIPs(httpSpec())), "http://192.168.1.1:8080", ""},
+ {"nil advertise : external IPs : securePort ", addNilAdvertise(addExternalIPs(httpsSpec())), "https://192.168.1.1:8443", ""},
+ {"nil advertise : external IPs : port + securePort ", addNilAdvertise(addExternalIPs(dualSpec())), "https://192.168.1.1:8443", ""},
+ {"nil advertise : external IPs : securePort, no cert ", addNilAdvertise(addExternalIPs(removeCert(httpsSpec()))), "", "Port"},
+ {"nil advertise : external IPs : port + securePort, no cert", addNilAdvertise(addExternalIPs(removeCert(dualSpec()))), "http://192.168.1.1:8080", ""},
+ {"nil advertise : external Hostnames: empty ", addNilAdvertise(addExternalHostnames(emptySpec())), "", "Port"},
+ {"nil advertise : external Hostnames: port ", addNilAdvertise(addExternalHostnames(httpSpec())), "http://s3.external.com:8080", ""},
+ {"nil advertise : external Hostnames: securePort ", addNilAdvertise(addExternalHostnames(httpsSpec())), "https://s3.external.com:8443", ""},
+ {"nil advertise : external Hostnames: port + securePort ", addNilAdvertise(addExternalHostnames(dualSpec())), "https://s3.external.com:8443", ""},
+ {"nil advertise : external Hostnames: securePort, no cert ", addNilAdvertise(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"},
+ {"nil advertise : external Hostnames: port + securePort, no cert", addNilAdvertise(addExternalHostnames(removeCert(dualSpec()))), "http://s3.external.com:8080", ""},
+
+ {"HTTP advertise : internal : empty ", addAdvertiseHttp(emptySpec()), "", "Port"},
+ {"HTTP advertise : internal : port ", addAdvertiseHttp(httpSpec()), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : internal : securePort ", addAdvertiseHttp(httpsSpec()), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : internal : port + securePort ", addAdvertiseHttp(dualSpec()), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : internal : securePort, no cert ", addAdvertiseHttp(removeCert(httpsSpec())), "", "Port"},
+ {"HTTP advertise : internal : port + securePort, no cert", addAdvertiseHttp(removeCert(dualSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external IPs : empty ", addAdvertiseHttp(addExternalIPs(emptySpec())), "", "Port"},
+ {"HTTP advertise : external IPs : port ", addAdvertiseHttp(addExternalIPs(httpSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external IPs : securePort ", addAdvertiseHttp(addExternalIPs(httpsSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external IPs : port + securePort ", addAdvertiseHttp(addExternalIPs(dualSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external IPs : securePort, no cert ", addAdvertiseHttp(addExternalIPs(removeCert(httpsSpec()))), "", "Port"},
+ {"HTTP advertise : external IPs : port + securePort, no cert", addAdvertiseHttp(addExternalIPs(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external Hostnames: empty ", addAdvertiseHttp(addExternalHostnames(emptySpec())), "", "Port"},
+ {"HTTP advertise : external Hostnames: port ", addAdvertiseHttp(addExternalHostnames(httpSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external Hostnames: securePort ", addAdvertiseHttp(addExternalHostnames(httpsSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external Hostnames: port + securePort ", addAdvertiseHttp(addExternalHostnames(dualSpec())), "http://my-endpoint.com:80", ""},
+ {"HTTP advertise : external Hostnames: securePort, no cert ", addAdvertiseHttp(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"},
+ {"HTTP advertise : external Hostnames: port + securePort, no cert", addAdvertiseHttp(addExternalHostnames(removeCert(dualSpec()))), "http://my-endpoint.com:80", ""},
+
+ {"HTTPS advertise: internal : empty ", addAdvertiseHttps(emptySpec()), "", "Port"},
+ {"HTTPS advertise: internal : port ", addAdvertiseHttps(httpSpec()), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: internal : securePort ", addAdvertiseHttps(httpsSpec()), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: internal : port + securePort ", addAdvertiseHttps(dualSpec()), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: internal : securePort, no cert ", addAdvertiseHttps(removeCert(httpsSpec())), "", "Port"},
+ {"HTTPS advertise: internal : port + securePort, no cert", addAdvertiseHttps(removeCert(dualSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external IPs : empty ", addAdvertiseHttps(addExternalIPs(emptySpec())), "", "Port"},
+ {"HTTPS advertise: external IPs : port ", addAdvertiseHttps(addExternalIPs(httpSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external IPs : securePort ", addAdvertiseHttps(addExternalIPs(httpsSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external IPs : port + securePort ", addAdvertiseHttps(addExternalIPs(dualSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external IPs : securePort, no cert ", addAdvertiseHttps(addExternalIPs(removeCert(httpsSpec()))), "", "Port"},
+ {"HTTPS advertise: external IPs : port + securePort, no cert", addAdvertiseHttps(addExternalIPs(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external Hostnames: empty ", addAdvertiseHttps(addExternalHostnames(emptySpec())), "", "Port"},
+ {"HTTPS advertise: external Hostnames: port ", addAdvertiseHttps(addExternalHostnames(httpSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external Hostnames: securePort ", addAdvertiseHttps(addExternalHostnames(httpsSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external Hostnames: port + securePort ", addAdvertiseHttps(addExternalHostnames(dualSpec())), "https://my-endpoint.com:443", ""},
+ {"HTTPS advertise: external Hostnames: securePort, no cert ", addAdvertiseHttps(addExternalHostnames(removeCert(httpsSpec()))), "", "Port"},
+ {"HTTPS advertise: external Hostnames: port + securePort, no cert", addAdvertiseHttps(addExternalHostnames(removeCert(dualSpec()))), "https://my-endpoint.com:443", ""},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.store.GetAdvertiseEndpointUrl()
+ assert.Equal(t, tt.want, got)
+ if tt.wantErrContain != "" {
+ assert.ErrorContains(t, err, tt.wantErrContain)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+
+ if tt.store.Spec.Hosting != nil {
+ t.Run("with DNS names: "+tt.name, func(t *testing.T) {
+ // dnsNames shouldn't change the test result at all
+ s := tt.store.DeepCopy()
+ s.Spec.Hosting.DNSNames = []string{"should.not.show.up"}
+ got, err := s.GetAdvertiseEndpointUrl()
+ assert.Equal(t, tt.want, got)
+ if tt.wantErrContain != "" {
+ assert.ErrorContains(t, err, tt.wantErrContain)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+ }
+}
diff --git a/pkg/apis/ceph.rook.io/v1/spec_test.go b/pkg/apis/ceph.rook.io/v1/spec_test.go
index 3bbbe6142989..88e95992cccc 100644
--- a/pkg/apis/ceph.rook.io/v1/spec_test.go
+++ b/pkg/apis/ceph.rook.io/v1/spec_test.go
@@ -92,3 +92,78 @@ storage:
assert.Equal(t, expectedSpec, clusterSpec)
}
+
+func newTrue() *bool {
+ t := true
+ return &t
+}
+
+func newFalse() *bool {
+ t := false
+ return &t
+}
+
+func newInt(val int) *int {
+ return &val
+}
+
+func newString(val string) *string {
+ return &val
+}
+
+func TestObjectStoreSpecMarshalSwiftAndKeystone(t *testing.T) {
+ // Assert that the new ObjectStoreSpec fields specified in are correctly parsed
+ specYaml := []byte(`
+auth:
+ keystone:
+ url: https://keystone:5000/
+ acceptedRoles: ["_member_", "service", "admin"]
+ implicitTenants: swift
+ tokenCacheSize: 1000
+ revocationInterval: 1200
+ serviceUserSecretName: rgw-service-user
+protocols:
+ swift:
+ accountInUrl: true
+ urlPrefix: /example
+ versioningEnabled: false
+ s3:
+ enabled: false
+ authUseKeystone: true
+`)
+ rawJSON, err := yaml.ToJSON(specYaml)
+ assert.Nil(t, err)
+ fmt.Printf("rawJSON: %s\n", string(rawJSON))
+
+ // unmarshal the JSON into a strongly typed storage spec object
+ var objectStoreSpec ObjectStoreSpec
+ err = json.Unmarshal(rawJSON, &objectStoreSpec)
+ assert.Nil(t, err)
+
+ // the unmarshalled storage spec should equal the expected spec below
+ expectedSpec := ObjectStoreSpec{
+ Auth: AuthSpec{
+ Keystone: &KeystoneSpec{
+ Url: "https://keystone:5000/",
+ AcceptedRoles: []string{"_member_", "service", "admin"},
+ ImplicitTenants: "swift",
+ TokenCacheSize: newInt(1000),
+ RevocationInterval: newInt(1200),
+ ServiceUserSecretName: "rgw-service-user",
+ },
+ },
+ Protocols: ProtocolSpec{
+ S3: &S3Spec{
+ Enabled: newFalse(),
+ AuthUseKeystone: newTrue(),
+ },
+ Swift: &SwiftSpec{
+ AccountInUrl: newTrue(),
+ UrlPrefix: newString("/example"),
+ VersioningEnabled: newFalse(),
+ },
+ },
+ }
+
+ assert.Equal(t, expectedSpec, objectStoreSpec)
+}
diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go
index 3a1dfa212b76..12a756231c17 100755
--- a/pkg/apis/ceph.rook.io/v1/types.go
+++ b/pkg/apis/ceph.rook.io/v1/types.go
@@ -1473,6 +1473,14 @@ type ObjectStoreSpec struct {
// +nullable
Gateway GatewaySpec `json:"gateway"`
+ // The protocol specification
+ // +optional
+ Protocols ProtocolSpec `json:"protocols,omitempty"`
+
+ // The authentication configuration
+ // +optional
+ Auth AuthSpec `json:"auth,omitempty"`
+
// The multisite info
// +optional
// +nullable
@@ -1497,7 +1505,10 @@ type ObjectStoreSpec struct {
// +optional
AllowUsersInNamespaces []string `json:"allowUsersInNamespaces,omitempty"`
- // Hosting settings for the object store
+ // Hosting settings for the object store.
+ // A common use case for hosting configuration is to inform Rook of endpoints that support DNS
+ // wildcards, which in turn allows virtual host-style bucket addressing.
+ // +nullable
// +optional
Hosting *ObjectStoreHostingSpec `json:"hosting,omitempty"`
}
@@ -1641,6 +1652,86 @@ type EndpointAddress struct {
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
}
+// ProtocolSpec represents a Ceph Object Store protocol specification
+type ProtocolSpec struct {
+ // The spec for S3
+ // +optional
+ // +nullable
+ S3 *S3Spec `json:"s3,omitempty"`
+
+ // The spec for Swift
+ // +optional
+ // +nullable
+ Swift *SwiftSpec `json:"swift"`
+}
+
+// S3Spec represents Ceph Object Store specification for the S3 API
+type S3Spec struct {
+ // Whether to enable S3. This defaults to true (even if protocols.s3 is not present in the CRD). This maintains backwards compatibility – by default S3 is enabled.
+ // +nullable
+ // +optional
+ Enabled *bool `json:"enabled,omitempty"`
+ // Whether to use Keystone for authentication. This option maps directly to the rgw_s3_auth_use_keystone option. Enabling it allows generating S3 credentials via an OpenStack API call, see the docs. If not given, the defaults of the corresponding RGW option apply.
+ // +nullable
+ // +optional
+ AuthUseKeystone *bool `json:"authUseKeystone,omitempty"`
+}
+
+// SwiftSpec represents Ceph Object Store specification for the Swift API
+type SwiftSpec struct {
+ // Whether or not the Swift account name should be included in the Swift API URL. If set to false (the default), then the Swift API will listen on a URL formed like http://host:port//v1. If set to true, the Swift API URL will be http://host:port//v1/AUTH_. You must set this option to true (and update the Keystone service catalog) if you want radosgw to support publicly-readable containers and temporary URLs.
+ // +nullable
+ // +optional
+ AccountInUrl *bool `json:"accountInUrl,omitempty"`
+ // The URL prefix for the Swift API, to distinguish it from the S3 API endpoint. The default is swift, which makes the Swift API available at the URL http://host:port/swift/v1 (or http://host:port/swift/v1/AUTH_%(tenant_id)s if rgw swift account in url is enabled).
+ // +nullable
+ // +optional
+ UrlPrefix *string `json:"urlPrefix,omitempty"`
+ // Enables the Object Versioning of OpenStack Object Storage API. This allows clients to put the X-Versions-Location attribute on containers that should be versioned.
+ // +nullable
+ // +optional
+ VersioningEnabled *bool `json:"versioningEnabled,omitempty"`
+}
+
+// AuthSpec represents the authentication protocol configuration of a Ceph Object Store Gateway
+type AuthSpec struct {
+ // The spec for Keystone
+ // +optional
+ // +nullable
+ Keystone *KeystoneSpec `json:"keystone,omitempty"`
+}
+
+// KeystoneSpec represents the Keystone authentication configuration of a Ceph Object Store Gateway
+type KeystoneSpec struct {
+ // The URL for the Keystone server.
+ Url string `json:"url"`
+ // The name of the secret containing the credentials for the service user account used by RGW. It has to be in the same namespace as the object store resource.
+ ServiceUserSecretName string `json:"serviceUserSecretName"`
+ // The roles requires to serve requests.
+ AcceptedRoles []string `json:"acceptedRoles"`
+ // Create new users in their own tenants of the same name. Possible values are true, false, swift and s3. The latter have the effect of splitting the identity space such that only the indicated protocol will use implicit tenants.
+ // +optional
+ ImplicitTenants ImplicitTenantSetting `json:"implicitTenants,omitempty"`
+ // The maximum number of entries in each Keystone token cache.
+ // +optional
+ // +nullable
+ TokenCacheSize *int `json:"tokenCacheSize,omitempty"`
+ // The number of seconds between token revocation checks.
+ // +optional
+ // +nullable
+ RevocationInterval *int `json:"revocationInterval,omitempty"`
+}
+
+type ImplicitTenantSetting string
+
+const (
+ ImplicitTenantSwift ImplicitTenantSetting = "swift"
+ ImplicitTenantS3 ImplicitTenantSetting = "s3"
+ ImplicitTenantTrue ImplicitTenantSetting = "true"
+ ImplicitTenantFalse ImplicitTenantSetting = "false"
+ ImplicitTenantDefault ImplicitTenantSetting = ""
+)
+
// ZoneSpec represents a Ceph Object Store Gateway Zone specification
type ZoneSpec struct {
// RGW Zone the Object Store is in
@@ -1675,16 +1766,47 @@ type ObjectEndpoints struct {
// ObjectStoreHostingSpec represents the hosting settings for the object store
type ObjectStoreHostingSpec struct {
- // A list of DNS names in which bucket can be accessed via virtual host path. These names need to valid according RFC-1123.
- // Each domain requires wildcard support like ingress loadbalancer.
- // Do not include the wildcard itself in the list of hostnames (e.g. use "mystore.example.com" instead of "*.mystore.example.com").
- // Add all hostnames including user-created Kubernetes Service endpoints to the list.
- // CephObjectStore Service Endpoints and CephObjectZone customEndpoints are automatically added to the list.
+ // AdvertiseEndpoint is the default endpoint Rook will return for resources dependent on this
+ // object store. This endpoint will be returned to CephObjectStoreUsers, Object Bucket Claims,
+ // and COSI Buckets/Accesses.
+ // By default, Rook returns the endpoint for the object store's Kubernetes service using HTTPS
+ // with `gateway.securePort` if it is defined (otherwise, HTTP with `gateway.port`).
+ // +nullable
+ // +optional
+ AdvertiseEndpoint *ObjectEndpointSpec `json:"advertiseEndpoint,omitempty"`
+ // A list of DNS host names on which object store gateways will accept client S3 connections.
+ // When specified, object store gateways will reject client S3 connections to hostnames that are
+ // not present in this list, so include all endpoints.
+ // The object store's advertiseEndpoint and Kubernetes service endpoint, plus CephObjectZone
+ // `customEndpoints` are automatically added to the list but may be set here again if desired.
+ // Each DNS name must be valid according RFC-1123.
+ // If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ // wildcard itself in the list of hostnames.
+ // E.g., use "mystore.example.com" instead of "*.mystore.example.com".
// The feature is supported only for Ceph v18 and later versions.
// +optional
DNSNames []string `json:"dnsNames,omitempty"`
}
+// ObjectEndpointSpec represents an object store endpoint
+type ObjectEndpointSpec struct {
+ // DnsName is the DNS name (in RFC-1123 format) of the endpoint.
+ // If the DNS name corresponds to an endpoint with DNS wildcard support, do not include the
+ // wildcard itself in the list of hostnames.
+ // E.g., use "mystore.example.com" instead of "*.mystore.example.com".
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ DnsName string `json:"dnsName"`
+ // Port is the port on which S3 connections can be made for this endpoint.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +required
+ Port int32 `json:"port"`
+ // UseTls defines whether the endpoint uses TLS (HTTPS) or not (HTTP).
+ // +required
+ UseTls bool `json:"useTls"`
+}
+
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -2864,6 +2986,11 @@ type StorageScopeSpec struct {
// Whether to allow updating the device class after the OSD is initially provisioned
// +optional
AllowDeviceClassUpdate bool `json:"allowDeviceClassUpdate,omitempty"`
+ // Whether Rook will resize the OSD CRUSH weight when the OSD PVC size is increased.
+ // This allows cluster data to be rebalanced to make most effective use of new OSD space.
+ // The default is false since data rebalancing can cause temporary cluster slowdown.
+ // +optional
+ AllowOsdCrushWeightUpdate bool `json:"allowOsdCrushWeightUpdate,omitempty"`
}
// OSDStore is the backend storage type used for creating the OSDs
diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
index 913fd8476794..fc927f6369d6 100644
--- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go
@@ -123,6 +123,27 @@ func (in AnnotationsSpec) DeepCopy() AnnotationsSpec {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthSpec) DeepCopyInto(out *AuthSpec) {
+ *out = *in
+ if in.Keystone != nil {
+ in, out := &in.Keystone, &out.Keystone
+ *out = new(KeystoneSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSpec.
+func (in *AuthSpec) DeepCopy() *AuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketNotificationSpec) DeepCopyInto(out *BucketNotificationSpec) {
*out = *in
@@ -2805,6 +2826,37 @@ func (in *KeyRotationSpec) DeepCopy() *KeyRotationSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeystoneSpec) DeepCopyInto(out *KeystoneSpec) {
+ *out = *in
+ if in.AcceptedRoles != nil {
+ in, out := &in.AcceptedRoles, &out.AcceptedRoles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.TokenCacheSize != nil {
+ in, out := &in.TokenCacheSize, &out.TokenCacheSize
+ *out = new(int)
+ **out = **in
+ }
+ if in.RevocationInterval != nil {
+ in, out := &in.RevocationInterval, &out.RevocationInterval
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneSpec.
+func (in *KeystoneSpec) DeepCopy() *KeystoneSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KeystoneSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Labels) DeepCopyInto(out *Labels) {
{
@@ -3442,6 +3494,22 @@ func (in *OSDStore) DeepCopy() *OSDStore {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectEndpointSpec) DeepCopyInto(out *ObjectEndpointSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectEndpointSpec.
+func (in *ObjectEndpointSpec) DeepCopy() *ObjectEndpointSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectEndpointSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectEndpoints) DeepCopyInto(out *ObjectEndpoints) {
*out = *in
@@ -3530,6 +3598,11 @@ func (in *ObjectSharedPoolsSpec) DeepCopy() *ObjectSharedPoolsSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObjectStoreHostingSpec) DeepCopyInto(out *ObjectStoreHostingSpec) {
*out = *in
+ if in.AdvertiseEndpoint != nil {
+ in, out := &in.AdvertiseEndpoint, &out.AdvertiseEndpoint
+ *out = new(ObjectEndpointSpec)
+ **out = **in
+ }
if in.DNSNames != nil {
in, out := &in.DNSNames, &out.DNSNames
*out = make([]string, len(*in))
@@ -3573,6 +3646,8 @@ func (in *ObjectStoreSpec) DeepCopyInto(out *ObjectStoreSpec) {
in.DataPool.DeepCopyInto(&out.DataPool)
out.SharedPools = in.SharedPools
in.Gateway.DeepCopyInto(&out.Gateway)
+ in.Protocols.DeepCopyInto(&out.Protocols)
+ in.Auth.DeepCopyInto(&out.Auth)
out.Zone = in.Zone
in.HealthCheck.DeepCopyInto(&out.HealthCheck)
if in.Security != nil {
@@ -4015,6 +4090,32 @@ func (in *ProbeSpec) DeepCopy() *ProbeSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProtocolSpec) DeepCopyInto(out *ProtocolSpec) {
+ *out = *in
+ if in.S3 != nil {
+ in, out := &in.S3, &out.S3
+ *out = new(S3Spec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Swift != nil {
+ in, out := &in.Swift, &out.Swift
+ *out = new(SwiftSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtocolSpec.
+func (in *ProtocolSpec) DeepCopy() *ProtocolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProtocolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PullSpec) DeepCopyInto(out *PullSpec) {
*out = *in
@@ -4182,6 +4283,32 @@ func (in ResourceSpec) DeepCopy() ResourceSpec {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3Spec) DeepCopyInto(out *S3Spec) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.AuthUseKeystone != nil {
+ in, out := &in.AuthUseKeystone, &out.AuthUseKeystone
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec.
+func (in *S3Spec) DeepCopy() *S3Spec {
+ if in == nil {
+ return nil
+ }
+ out := new(S3Spec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SSSDSidecar) DeepCopyInto(out *SSSDSidecar) {
*out = *in
@@ -4584,6 +4711,37 @@ func (in *StretchClusterSpec) DeepCopy() *StretchClusterSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SwiftSpec) DeepCopyInto(out *SwiftSpec) {
+ *out = *in
+ if in.AccountInUrl != nil {
+ in, out := &in.AccountInUrl, &out.AccountInUrl
+ *out = new(bool)
+ **out = **in
+ }
+ if in.UrlPrefix != nil {
+ in, out := &in.UrlPrefix, &out.UrlPrefix
+ *out = new(string)
+ **out = **in
+ }
+ if in.VersioningEnabled != nil {
+ in, out := &in.VersioningEnabled, &out.VersioningEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SwiftSpec.
+func (in *SwiftSpec) DeepCopy() *SwiftSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SwiftSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TopicEndpointSpec) DeepCopyInto(out *TopicEndpointSpec) {
*out = *in
diff --git a/pkg/apis/go.mod b/pkg/apis/go.mod
index 4c3f000279b2..b54c69562f77 100644
--- a/pkg/apis/go.mod
+++ b/pkg/apis/go.mod
@@ -21,18 +21,22 @@ require (
github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.9.0
- k8s.io/api v0.30.3
- k8s.io/apimachinery v0.30.3
+ k8s.io/api v0.31.0
+ k8s.io/apimachinery v0.31.0
)
require (
github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/rogpeppe/go-internal v1.12.0 // indirect
+ github.com/onsi/ginkgo/v2 v2.20.0 // indirect
+ github.com/onsi/gomega v1.34.1 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/client-go v0.30.3 // indirect
- k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
+ k8s.io/client-go v0.31.0 // indirect
+ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
@@ -41,7 +45,6 @@ require (
github.com/containernetworking/cni v1.2.0-rc1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
@@ -74,12 +77,12 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/net v0.26.0 // indirect
+ golang.org/x/crypto v0.26.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/term v0.21.0 // indirect
- golang.org/x/text v0.16.0 // indirect
+ golang.org/x/sys v0.23.0 // indirect
+ golang.org/x/term v0.23.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/pkg/apis/go.sum b/pkg/apis/go.sum
index 4ae9ba9ba0a9..6f1425394578 100644
--- a/pkg/apis/go.sum
+++ b/pkg/apis/go.sum
@@ -200,8 +200,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
-github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
@@ -211,6 +209,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -387,8 +387,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
+github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -609,8 +609,8 @@ github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8Ay
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
-github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
-github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
+github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -624,8 +624,8 @@ github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
+github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999 h1:+S998xHiJApsJZjRAO8wyedU9GfqFd8mtwWly6LqHDo=
github.com/openshift/api v0.0.0-20240301093301-ce10821dc999/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=
@@ -749,6 +749,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -805,8 +807,8 @@ golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -817,6 +819,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -916,8 +920,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1053,8 +1057,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1067,8 +1071,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1086,8 +1090,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1169,8 +1173,8 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1358,6 +1362,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@@ -1403,8 +1409,8 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
-k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
-k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
+k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
+k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
@@ -1417,8 +1423,8 @@ k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
-k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
+k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
@@ -1427,8 +1433,8 @@ k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
-k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
-k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
+k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
+k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
@@ -1468,8 +1474,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
+k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/pkg/daemon/ceph/client/osd.go b/pkg/daemon/ceph/client/osd.go
index 63b9341c2e29..8351438825bf 100644
--- a/pkg/daemon/ceph/client/osd.go
+++ b/pkg/daemon/ceph/client/osd.go
@@ -18,6 +18,7 @@ package client
import (
"encoding/json"
"fmt"
+ "math"
"strconv"
"strings"
@@ -42,7 +43,7 @@ type OSDNodeUsage struct {
CrushWeight json.Number `json:"crush_weight"`
Depth json.Number `json:"depth"`
Reweight json.Number `json:"reweight"`
- KB json.Number `json:"kb"`
+ KB json.Number `json:"kb"` // KB is in KiB units
UsedKB json.Number `json:"kb_used"`
AvailKB json.Number `json:"kb_avail"`
Utilization json.Number `json:"utilization"`
@@ -220,6 +221,48 @@ func GetOSDUsage(context *clusterd.Context, clusterInfo *ClusterInfo) (*OSDUsage
return &osdUsage, nil
}
+func convertKibibytesToTebibytes(kib string) (float64, error) {
+ kibFloat, err := strconv.ParseFloat(kib, 64)
+ if err != nil {
+ return float64(0), errors.Wrap(err, "failed to convert string to float")
+ }
+ return kibFloat / float64(1024*1024*1024), nil
+}
+
+func ResizeOsdCrushWeight(actualOSD OSDNodeUsage, ctx *clusterd.Context, clusterInfo *ClusterInfo) (bool, error) {
+ currentCrushWeight, err := strconv.ParseFloat(actualOSD.CrushWeight.String(), 64)
+ if err != nil {
+ return false, errors.Wrapf(err, "failed converting string to float for osd.%d crush weight %q", actualOSD.ID, actualOSD.CrushWeight.String())
+ }
+ // actualOSD.KB is in KiB units
+ calculatedCrushWeight, err := convertKibibytesToTebibytes(actualOSD.KB.String())
+ if err != nil {
+ return false, errors.Wrapf(err, "failed to convert KiB to TiB for osd.%d crush weight %q", actualOSD.ID, actualOSD.KB.String())
+ }
+
+ // do not reweight if the calculated crush weight is 0 or less than equal to actualCrushWeight or there percentage resize is less than 1 percent
+ if calculatedCrushWeight == float64(0) {
+ logger.Debugf("osd size is 0 for osd.%d, not resizing the crush weights", actualOSD.ID)
+ return false, nil
+ } else if calculatedCrushWeight <= currentCrushWeight {
+ logger.Debugf("calculatedCrushWeight %f is less then current currentCrushWeight %f for osd.%d, not resizing the crush weights", calculatedCrushWeight, currentCrushWeight, actualOSD.ID)
+ return false, nil
+ } else if math.Abs(((calculatedCrushWeight - currentCrushWeight) / currentCrushWeight)) <= 0.01 {
+ logger.Debugf("calculatedCrushWeight %f is less then 1 percent increased from currentCrushWeight %f for osd.%d, not resizing the crush weights", calculatedCrushWeight, currentCrushWeight, actualOSD.ID)
+ return false, nil
+ }
+
+ calculatedCrushWeightString := fmt.Sprintf("%f", calculatedCrushWeight)
+ logger.Infof("updating osd.%d crush weight to %q for cluster in namespace %q", actualOSD.ID, calculatedCrushWeightString, clusterInfo.Namespace)
+ args := []string{"osd", "crush", "reweight", fmt.Sprintf("osd.%d", actualOSD.ID), calculatedCrushWeightString}
+ buf, err := NewCephCommand(ctx, clusterInfo, args).Run()
+ if err != nil {
+ return false, errors.Wrapf(err, "failed to reweight osd.%d for cluster in namespace %q from actual crush weight %f to calculated crush weight %f: %s", actualOSD.ID, clusterInfo.Namespace, currentCrushWeight, calculatedCrushWeight, string(buf))
+ }
+
+ return true, nil
+}
+
func SetDeviceClass(context *clusterd.Context, clusterInfo *ClusterInfo, osdID int, deviceClass string) error {
// First remove the existing device class
args := []string{"osd", "crush", "rm-device-class", fmt.Sprintf("osd.%d", osdID)}
diff --git a/pkg/daemon/ceph/client/osd_test.go b/pkg/daemon/ceph/client/osd_test.go
index b6a2c77f0a13..3fa09d8a0adc 100644
--- a/pkg/daemon/ceph/client/osd_test.go
+++ b/pkg/daemon/ceph/client/osd_test.go
@@ -141,6 +141,18 @@ func TestOSDDeviceClasses(t *testing.T) {
})
}
+func TestConvertKibibytesToTebibytes(t *testing.T) {
+ kib := "1024"
+ terabyte, err := convertKibibytesToTebibytes(kib)
+ assert.NoError(t, err)
+ assert.Equal(t, float64(9.5367431640625e-07), terabyte)
+
+ kib = "1073741824"
+ terabyte, err = convertKibibytesToTebibytes(kib)
+ assert.NoError(t, err)
+ assert.Equal(t, float64(1), terabyte)
+}
+
func TestOSDOkToStop(t *testing.T) {
returnString := ""
returnOkResult := true
diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go
index 4638f2babcd0..11eecc632472 100644
--- a/pkg/operator/ceph/cluster/controller.go
+++ b/pkg/operator/ceph/cluster/controller.go
@@ -21,8 +21,10 @@ import (
"context"
"fmt"
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
"github.com/coreos/pkg/capnslog"
- addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
+ addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/csiaddons/v1alpha1"
+
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
@@ -138,6 +140,11 @@ func add(opManagerContext context.Context, mgr manager.Manager, r reconcile.Reco
return err
}
+ err = csiopv1a1.AddToScheme(mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
// Watch for changes on the CephCluster CR object
s := source.Kind[client.Object](
mgr.GetCache(), &cephv1.CephCluster{TypeMeta: ControllerTypeMeta},
diff --git a/pkg/operator/ceph/cluster/controller_test.go b/pkg/operator/ceph/cluster/controller_test.go
index 2ab9036111c2..86296cb0310e 100644
--- a/pkg/operator/ceph/cluster/controller_test.go
+++ b/pkg/operator/ceph/cluster/controller_test.go
@@ -21,7 +21,7 @@ import (
"testing"
"time"
- addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
+ addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/csiaddons/v1alpha1"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake"
"github.com/rook/rook/pkg/client/clientset/versioned/scheme"
diff --git a/pkg/operator/ceph/cluster/mgr/dashboard_test.go b/pkg/operator/ceph/cluster/mgr/dashboard_test.go
index 8a568758f59d..f82ada4d32d3 100644
--- a/pkg/operator/ceph/cluster/mgr/dashboard_test.go
+++ b/pkg/operator/ceph/cluster/mgr/dashboard_test.go
@@ -29,6 +29,7 @@ import (
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -158,7 +159,7 @@ func TestStartSecureDashboard(t *testing.T) {
svc, err = c.context.Clientset.CoreV1().Services(clusterInfo.Namespace).Get(ctx, "rook-ceph-mgr-dashboard", metav1.GetOptions{})
assert.NotNil(t, err)
assert.True(t, kerrors.IsNotFound(err))
- assert.Nil(t, svc)
+ assert.Equal(t, svc, &v1.Service{})
// Set the port to something over 1024 and confirm the port and targetPort are the same
c.spec.Dashboard.Enabled = true
diff --git a/pkg/operator/ceph/cluster/mon/mon.go b/pkg/operator/ceph/cluster/mon/mon.go
index 78de9534ead4..d80839d65c2c 100644
--- a/pkg/operator/ceph/cluster/mon/mon.go
+++ b/pkg/operator/ceph/cluster/mon/mon.go
@@ -1112,6 +1112,17 @@ func (c *Cluster) saveMonConfig() error {
return errors.Wrap(err, "failed to update csi cluster config")
}
+ if csi.EnableCSIOperator() && len(c.ClusterInfo.Monitors) > 0 {
+ err := csi.CreateUpdateCephConnection(c.context.Client, c.ClusterInfo, c.spec)
+ if err != nil {
+ return errors.Wrap(err, "failed to create/update cephConnection")
+ }
+ err = csi.CreateDefaultClientProfile(c.context.Client, c.ClusterInfo, c.ClusterInfo.NamespacedName())
+ if err != nil {
+ return errors.Wrap(err, "failed to create/update default client profile")
+ }
+ }
+
return nil
}
diff --git a/pkg/operator/ceph/cluster/osd/create_test.go b/pkg/operator/ceph/cluster/osd/create_test.go
index 6a5aea9de09d..7312d8b95ae7 100644
--- a/pkg/operator/ceph/cluster/osd/create_test.go
+++ b/pkg/operator/ceph/cluster/osd/create_test.go
@@ -353,7 +353,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) {
Name: "set1",
Count: 0,
VolumeClaimTemplates: []cephv1.VolumeClaimTemplate{
- newDummyPVC("data", namespace, "10Gi", "gp2"),
+ newDummyPVC("data", namespace, "10Gi", "gp2-csi"),
},
},
},
@@ -378,7 +378,7 @@ func Test_startProvisioningOverPVCs(t *testing.T) {
Name: "set1",
Count: 2,
VolumeClaimTemplates: []cephv1.VolumeClaimTemplate{
- newDummyPVC("data", namespace, "10Gi", "gp2"),
+ newDummyPVC("data", namespace, "10Gi", "gp2-csi"),
},
},
},
diff --git a/pkg/operator/ceph/cluster/osd/osd.go b/pkg/operator/ceph/cluster/osd/osd.go
index 24dbc52a1cd3..e9b4c0c6453d 100644
--- a/pkg/operator/ceph/cluster/osd/osd.go
+++ b/pkg/operator/ceph/cluster/osd/osd.go
@@ -326,13 +326,24 @@ func (c *Cluster) postReconcileUpdateOSDProperties(desiredOSDs map[int]*OSDInfo)
}
logger.Debugf("post processing osd properties with %d actual osds from ceph osd df and %d existing osds found during reconcile", len(osdUsage.OSDNodes), len(desiredOSDs))
for _, actualOSD := range osdUsage.OSDNodes {
- if desiredOSD, ok := desiredOSDs[actualOSD.ID]; ok {
- if err := c.updateDeviceClassIfChanged(actualOSD.ID, desiredOSD.DeviceClass, actualOSD.DeviceClass); err != nil {
+ if c.spec.Storage.AllowOsdCrushWeightUpdate {
+ _, err := cephclient.ResizeOsdCrushWeight(actualOSD, c.context, c.clusterInfo)
+ if err != nil {
// Log the error and allow other updates to continue
- logger.Error(err)
+ logger.Errorf("failed to resize osd crush weight on cluster in namespace %s: %v", c.clusterInfo.Namespace, err)
}
}
+
+ desiredOSD, ok := desiredOSDs[actualOSD.ID]
+ if !ok {
+ continue
+ }
+ if err := c.updateDeviceClassIfChanged(actualOSD.ID, desiredOSD.DeviceClass, actualOSD.DeviceClass); err != nil {
+ // Log the error and allow other updates to continue
+ logger.Errorf("failed to update device class on cluster in namespace %s: %v", c.clusterInfo.Namespace, err)
+ }
}
+
return nil
}
diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go
index 8d38bcae7c0e..de2e62c270e7 100644
--- a/pkg/operator/ceph/cluster/osd/osd_test.go
+++ b/pkg/operator/ceph/cluster/osd/osd_test.go
@@ -51,11 +51,20 @@ import (
const (
healthyCephStatus = `{"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","health":{"checks":{},"status":"HEALTH_OK"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["a","b","c"],"monmap":{"epoch":3,"fsid":"877a47e0-7f6c-435e-891a-76983ab8c509","modified":"2020-11-02 09:58:23.015313","created":"2020-11-02 09:57:37.719235","min_mon_release":14,"min_mon_release_name":"nautilus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.74.42:3300","nonce":0},{"type":"v1","addr":"172.30.74.42:6789","nonce":0}]},"addr":"172.30.74.42:6789/0","public_addr":"172.30.74.42:6789/0"},{"rank":1,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.101.61:3300","nonce":0},{"type":"v1","addr":"172.30.101.61:6789","nonce":0}]},"addr":"172.30.101.61:6789/0","public_addr":"172.30.101.61:6789/0"},{"rank":2,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"172.30.250.55:3300","nonce":0},{"type":"v1","addr":"172.30.250.55:6789","nonce":0}]},"addr":"172.30.250.55:6789/0","public_addr":"172.30.250.55:6789/0"}]},"osdmap":{"osdmap":{"epoch":19,"num_osds":3,"num_up_osds":3,"num_in_osds":3,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"active+clean","count":96}],"num_pgs":96,"num_pools":3,"num_objects":79,"data_bytes":81553681,"bytes_used":3255447552,"bytes_avail":1646011994112,"bytes_total":1649267441664,"read_bytes_sec":853,"write_bytes_sec":5118,"read_op_per_sec":1,"write_op_per_sec":0},"fsmap":{"epoch":9,"id":1,"up":1,"in":1,"max":1,"by_rank":[{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-b","status":"up:active","gid":14161},{"filesystem_id":1,"rank":0,"name":"ocs-storagecluster-cephfilesystem-a","status":"up:standby-replay","gid":24146}],"up:standby":0},"mgrmap":{"epoch":10,"active_gid":14122,"active_name":"a","active_addrs":{"addrvec":[{"type":"v2","addr":"10.131.0.28:6800","nonce":1},{"type":"v1","addr":"10.131.0.28:6801","nonce":1}]}}}`
unHealthyCephStatus = `{"fsid":"613975f3-3025-4802-9de1-a2280b950e75","health":{"checks":{"OSD_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 osds down"}},"OSD_HOST_DOWN":{"severity":"HEALTH_WARN","summary":{"message":"1 host (1 osds) down"}},"PG_AVAILABILITY":{"severity":"HEALTH_WARN","summary":{"message":"Reduced data availability: 101 pgs stale"}},"POOL_APP_NOT_ENABLED":{"severity":"HEALTH_WARN","summary":{"message":"application not enabled on 1 pool(s)"}}},"status":"HEALTH_WARN","overall_status":"HEALTH_WARN"},"election_epoch":12,"quorum":[0,1,2],"quorum_names":["rook-ceph-mon0","rook-ceph-mon2","rook-ceph-mon1"],"monmap":{"epoch":3,"fsid":"613975f3-3025-4802-9de1-a2280b950e75","modified":"2017-08-11 20:13:02.075679","created":"2017-08-11 20:12:35.314510","features":{"persistent":["kraken","luminous"],"optional":[]},"mons":[{"rank":0,"name":"rook-ceph-mon0","addr":"10.3.0.45:6789/0","public_addr":"10.3.0.45:6789/0"},{"rank":1,"name":"rook-ceph-mon2","addr":"10.3.0.249:6789/0","public_addr":"10.3.0.249:6789/0"},{"rank":2,"name":"rook-ceph-mon1","addr":"10.3.0.252:6789/0","public_addr":"10.3.0.252:6789/0"}]},"osdmap":{"osdmap":{"epoch":17,"num_osds":2,"num_up_osds":1,"num_in_osds":2,"full":false,"nearfull":true,"num_remapped_pgs":0}},"pgmap":{"pgs_by_state":[{"state_name":"stale+active+clean","count":101},{"state_name":"active+clean","count":99}],"num_pgs":200,"num_pools":2,"num_objects":243,"data_bytes":976793635,"bytes_used":13611479040,"bytes_avail":19825307648,"bytes_total":33436786688},"fsmap":{"epoch":1,"by_rank":[]},"mgrmap":{"epoch":3,"active_gid":14111,"active_name":"rook-ceph-mgr0","active_addr":"10.2.73.6:6800/9","available":true,"standbys":[],"modules":["restful","status"],"available_modules":["dashboard","prometheus","restful","status","zabbix"]},"servicemap":{"epoch":1,"modified":"0.000000","services":{}}}`
- osdDFResults = `
+ // osdDFResults is a JSON representation of the output of `ceph osd df` command
+ // which has 5 osds with different storage usage
+ // Testing the resize of crush weight for OSDs based on the utilization
+ // 1) `ceph osd df`, kb size(in Tib) < crush_weight size -> no reweight
+ // 2) `ceph osd df`, kb size(in Tib) = 0 -> no reweight
+ // 3) `ceph osd df`, kb size(in Tib) and crush_weight size has 0.085% difference -> no reweight
+ // 4) & 5) `ceph osd df`, kb size(in Tib) and crush_weight size has more than 1% difference -> reweight
+ osdDFResults = `
{"nodes":[
{"id":0,"device_class":"hdd","name":"osd.0","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":41943040,"kb_used":27640,"kb_used_data":432,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915400,"utilization":0.065898895263671875,"var":0.99448308946989694,"pgs":9,"status":"up"},
- {"id":1,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":41943040,"kb_used":27960,"kb_used_data":752,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915080,"utilization":0.066661834716796875,"var":1.005996641880547,"pgs":15,"status":"up"},
- {"id":2,"device_class":"hdd","name":"osd.2","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":41943040,"kb_used":27780,"kb_used_data":564,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915260,"utilization":0.066232681274414062,"var":0.99952026864955634,"pgs":8,"status":"up"}],
+ {"id":1,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":0,"kb_used":27960,"kb_used_data":752,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915080,"utilization":0.066661834716796875,"var":1.005996641880547,"pgs":15,"status":"up"},
+ {"id":2,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":42333872,"kb_used":27960,"kb_used_data":752,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915080,"utilization":0.066661834716796875,"var":1.005996641880547,"pgs":15,"status":"up"},
+ {"id":3,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":9841943040,"kb_used":27960,"kb_used_data":752,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915080,"utilization":0.066661834716796875,"var":1.005996641880547,"pgs":15,"status":"up"},
+ {"id":4,"device_class":"hdd","name":"osd.2","type":"osd","type_id":0,"crush_weight":0.039093017578125,"depth":2,"pool_weights":{},"reweight":1,"kb":9991943040,"kb_used":27780,"kb_used_data":564,"kb_used_omap":1,"kb_used_meta":27198,"kb_avail":41915260,"utilization":0.066232681274414062,"var":0.99952026864955634,"pgs":8,"status":"up"}],
"stray":[],"summary":{"total_kb":125829120,"total_kb_used":83380,"total_kb_used_data":1748,"total_kb_used_omap":3,"total_kb_used_meta":81596,"total_kb_avail":125745740,"average_utilization":0.066264470418294266,"min_var":0.99448308946989694,"max_var":1.005996641880547,"dev":0.00031227879054369131}}`
)
@@ -370,12 +379,14 @@ func TestAddRemoveNode(t *testing.T) {
assert.True(t, k8serrors.IsNotFound(err))
}
-func TestUpdateDeviceClass(t *testing.T) {
+func TestPostReconcileUpdateOSDProperties(t *testing.T) {
namespace := "ns"
clientset := fake.NewSimpleClientset()
removedDeviceClassOSD := ""
setDeviceClassOSD := ""
setDeviceClass := ""
+ var crushWeight []string
+ var osdID []string
executor := &exectest.MockExecutor{
MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) {
logger.Infof("ExecuteCommandWithOutput: %s %v", command, args)
@@ -389,6 +400,9 @@ func TestUpdateDeviceClass(t *testing.T) {
} else if args[2] == "set-device-class" {
setDeviceClass = args[3]
setDeviceClassOSD = args[4]
+ } else if args[2] == "reweight" {
+ osdID = append(osdID, args[3])
+ crushWeight = append(crushWeight, args[4])
}
}
}
@@ -401,7 +415,6 @@ func TestUpdateDeviceClass(t *testing.T) {
Name: "testing",
Namespace: namespace,
},
- Spec: cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{AllowDeviceClassUpdate: true}},
}
// Objects to track in the fake client.
object := []runtime.Object{
@@ -425,11 +438,22 @@ func TestUpdateDeviceClass(t *testing.T) {
1: {ID: 1, DeviceClass: "hdd"},
2: {ID: 2, DeviceClass: "newclass"},
}
- err := c.postReconcileUpdateOSDProperties(desiredOSDs)
- assert.Nil(t, err)
- assert.Equal(t, "newclass", setDeviceClass)
- assert.Equal(t, "osd.2", setDeviceClassOSD)
- assert.Equal(t, "osd.2", removedDeviceClassOSD)
+ t.Run("test device class change", func(t *testing.T) {
+ c.spec.Storage = cephv1.StorageScopeSpec{AllowDeviceClassUpdate: true}
+ err := c.postReconcileUpdateOSDProperties(desiredOSDs)
+ assert.Nil(t, err)
+ assert.Equal(t, "newclass", setDeviceClass)
+ assert.Equal(t, "osd.2", setDeviceClassOSD)
+ assert.Equal(t, "osd.2", removedDeviceClassOSD)
+ })
+ t.Run("test resize Osd Crush Weight", func(t *testing.T) {
+ c.spec.Storage = cephv1.StorageScopeSpec{AllowOsdCrushWeightUpdate: true}
+ err := c.postReconcileUpdateOSDProperties(desiredOSDs)
+ assert.Nil(t, err)
+ // only osds with more than 1% change in utilization should be reweighted
+ assert.Equal(t, []string([]string{"osd.3", "osd.4"}), osdID)
+ assert.Equal(t, []string([]string{"9.166024", "9.305722"}), crushWeight)
+ })
}
func TestAddNodeFailure(t *testing.T) {
diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go
index f57021dde638..f8f767091338 100644
--- a/pkg/operator/ceph/cluster/watcher.go
+++ b/pkg/operator/ceph/cluster/watcher.go
@@ -25,7 +25,7 @@ import (
"strings"
"time"
- addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
+ addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/csiaddons/v1alpha1"
pkgerror "github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
diff --git a/pkg/operator/ceph/cluster/watcher_test.go b/pkg/operator/ceph/cluster/watcher_test.go
index f4b83ca076a7..e02de9869488 100644
--- a/pkg/operator/ceph/cluster/watcher_test.go
+++ b/pkg/operator/ceph/cluster/watcher_test.go
@@ -24,7 +24,7 @@ import (
"testing"
"github.com/coreos/pkg/capnslog"
- addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
+ addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/csiaddons/v1alpha1"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/client/clientset/versioned/scheme"
diff --git a/pkg/operator/ceph/controller/network.go b/pkg/operator/ceph/controller/network.go
index 4c64d37ab03f..b97cb454af1f 100644
--- a/pkg/operator/ceph/controller/network.go
+++ b/pkg/operator/ceph/controller/network.go
@@ -264,6 +264,8 @@ func discoverAddressRanges(
job.Spec.Template.Annotations = map[string]string{
nadv1.NetworkAttachmentAnnot: netSelectionValue,
}
+ cephv1.GetCmdReporterAnnotations(clusterSpec.Annotations).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
+ cephv1.GetCmdReporterLabels(clusterSpec.Labels).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
// use osd placement for net canaries b/c osd pods are present on both public and cluster nets
cephv1.GetOSDPlacement(clusterSpec.Placement).ApplyToPodSpec(&job.Spec.Template.Spec)
diff --git a/pkg/operator/ceph/controller/version.go b/pkg/operator/ceph/controller/version.go
index 1585cef77695..915672fda7d2 100644
--- a/pkg/operator/ceph/controller/version.go
+++ b/pkg/operator/ceph/controller/version.go
@@ -86,6 +86,9 @@ func DetectCephVersion(ctx context.Context, rookImage, namespace, jobName string
cephv1.GetMonPlacement(cephClusterSpec.Placement).ApplyToPodSpec(&job.Spec.Template.Spec)
job.Spec.Template.Spec.Affinity.PodAntiAffinity = nil
+ cephv1.GetCmdReporterAnnotations(cephClusterSpec.Annotations).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
+ cephv1.GetCmdReporterLabels(cephClusterSpec.Labels).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
+
stdout, stderr, retcode, err := versionReporter.Run(ctx, detectCephVersionTimeout)
if err != nil {
return nil, errors.Wrap(err, "failed to complete ceph version job")
diff --git a/pkg/operator/ceph/cr_manager.go b/pkg/operator/ceph/cr_manager.go
index 0652b16630ff..b3398d681a18 100644
--- a/pkg/operator/ceph/cr_manager.go
+++ b/pkg/operator/ceph/cr_manager.go
@@ -44,12 +44,14 @@ import (
"github.com/rook/rook/pkg/operator/ceph/object/zonegroup"
"github.com/rook/rook/pkg/operator/ceph/pool"
"github.com/rook/rook/pkg/operator/ceph/pool/radosnamespace"
+ "github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/apimachinery/pkg/runtime"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
)
@@ -141,15 +143,23 @@ func (o *Operator) startCRDManager(context context.Context, mgrErrorCh chan erro
}
}
+ metricsBindAddress, err := k8sutil.GetOperatorSetting(context, o.context.Clientset, opcontroller.OperatorSettingConfigMapName, "ROOK_OPERATOR_METRICS_BIND_ADDRESS", "0")
+ if err != nil {
+ mgrErrorCh <- errors.Wrap(err, "failed to get configmap value `ROOK_OPERATOR_METRICS_BIND_ADDRESS`.")
+ return
+ }
+ skipNameValidation := true
// Set up a manager
mgrOpts := manager.Options{
LeaderElection: false,
Metrics: metricsserver.Options{
- // BindAddress is the bind address for controller runtime metrics server default is 8080. Since we don't use the
- // controller runtime metrics server, we need to set the bind address 0 so that port 8080 is available.
- BindAddress: "0",
+ // BindAddress is the bind address for controller runtime metrics server. Defaulted to "0" which is off.
+ BindAddress: metricsBindAddress,
},
Scheme: scheme,
+ Controller: config.Controller{
+ SkipNameValidation: &skipNameValidation,
+ },
}
if o.config.NamespaceToWatch != "" {
diff --git a/pkg/operator/ceph/csi/ceph_connection.go b/pkg/operator/ceph/csi/ceph_connection.go
new file mode 100644
index 000000000000..bedb6664bf9b
--- /dev/null
+++ b/pkg/operator/ceph/csi/ceph_connection.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "github.com/pkg/errors"
+
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func CreateUpdateCephConnection(c client.Client, clusterInfo *cephclient.ClusterInfo, clusterSpec cephv1.ClusterSpec) error {
+
+ logger.Infof("Configuring ceph connection CR %q in namespace %q", clusterInfo.NamespacedName().Name, clusterInfo.NamespacedName().Namespace)
+ csiCephConnection := &csiopv1a1.CephConnection{}
+
+ csiCephConnection.Name = clusterInfo.NamespacedName().Name
+ csiCephConnection.Namespace = clusterInfo.NamespacedName().Namespace
+
+ spec, err := generateCephConnSpec(c, clusterInfo, csiCephConnection.Spec, clusterSpec)
+ if err != nil {
+ return errors.Wrapf(err, "failed to set ceph connection CR %q in namespace %q", csiCephConnection.Name, clusterInfo.Namespace)
+ }
+
+ err = clusterInfo.OwnerInfo.SetOwnerReference(csiCephConnection)
+ if err != nil {
+ return errors.Wrapf(err, "failed to set owner reference for ceph connection CR %q", csiCephConnection.Name)
+ }
+
+ err = c.Get(clusterInfo.Context, types.NamespacedName{Name: csiCephConnection.Name, Namespace: csiCephConnection.Namespace}, csiCephConnection)
+ if err != nil {
+ if kerrors.IsNotFound(err) {
+ csiCephConnection.Spec = spec
+ err = c.Create(clusterInfo.Context, csiCephConnection)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph connection CR %q", csiCephConnection.Name)
+ }
+
+ logger.Infof("Successfully created ceph connection CR %q", csiCephConnection.Name)
+ return nil
+ }
+ return errors.Wrap(err, "failed to get ceph connection CR")
+ }
+
+ csiCephConnection.Spec = spec
+ err = c.Update(clusterInfo.Context, csiCephConnection)
+ if err != nil {
+ return errors.Wrapf(err, "failed to update ceph connection CR %q", csiCephConnection.Name)
+ }
+
+ logger.Infof("Successfully updated ceph connection CR %q", csiCephConnection.Name)
+ return nil
+}
+
+func generateCephConnSpec(c client.Client, clusterInfo *cephclient.ClusterInfo, csiClusterConnSpec csiopv1a1.CephConnectionSpec, clusterSpec cephv1.ClusterSpec) (csiopv1a1.CephConnectionSpec, error) {
+ if clusterSpec.CSI.ReadAffinity.Enabled {
+ csiClusterConnSpec = csiopv1a1.CephConnectionSpec{
+ ReadAffinity: &csiopv1a1.ReadAffinitySpec{
+ CrushLocationLabels: clusterSpec.CSI.ReadAffinity.CrushLocationLabels,
+ },
+ }
+ }
+
+ cephRBDMirrorList := &cephv1.CephRBDMirrorList{}
+ err := c.List(clusterInfo.Context, cephRBDMirrorList, &client.ListOptions{Namespace: clusterInfo.Namespace})
+ if err != nil {
+ return csiClusterConnSpec, errors.Wrapf(err, "failed to list CephRBDMirror resource")
+ }
+
+ if len(cephRBDMirrorList.Items) == 0 {
+ logger.Debug("no ceph CephRBDMirror found")
+ } else {
+ // Currently, only single RBD mirror is supported
+ csiClusterConnSpec.RbdMirrorDaemonCount = cephRBDMirrorList.Items[0].Spec.Count
+ }
+
+ for _, mon := range clusterInfo.Monitors {
+ csiClusterConnSpec.Monitors = append(csiClusterConnSpec.Monitors, mon.Endpoint)
+ }
+
+ return csiClusterConnSpec, nil
+}
diff --git a/pkg/operator/ceph/csi/ceph_connection_test.go b/pkg/operator/ceph/csi/ceph_connection_test.go
new file mode 100644
index 000000000000..c2f6f4396703
--- /dev/null
+++ b/pkg/operator/ceph/csi/ceph_connection_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "testing"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ "github.com/rook/rook/pkg/client/clientset/versioned/scheme"
+ clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test"
+ "github.com/stretchr/testify/assert"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestCreateUpdateCephConnection(t *testing.T) {
+ c := clienttest.CreateTestClusterInfo(3)
+ ns := "test"
+ c.Namespace = ns
+ c.SetName("testcluster")
+ c.NamespacedName()
+
+ cluster := &cephv1.CephCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testCluster",
+ Namespace: ns,
+ },
+ Spec: cephv1.ClusterSpec{
+ CSI: cephv1.CSIDriverSpec{
+ ReadAffinity: cephv1.ReadAffinitySpec{
+ Enabled: true,
+ CrushLocationLabels: []string{"kubernetes.io/hostname"},
+ },
+ },
+ },
+ }
+ csiCephConnection := &csiopv1a1.CephConnection{}
+
+ // Register operator types with the runtime scheme.
+ s := scheme.Scheme
+ s.AddKnownTypes(cephv1.SchemeGroupVersion, csiCephConnection, &cephv1.CephCluster{}, &cephv1.CephRBDMirrorList{})
+ object := []runtime.Object{
+ cluster,
+ }
+
+ // Create a fake client to mock API calls.
+ cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ err := CreateUpdateCephConnection(cl, c, cluster.Spec)
+ assert.NoError(t, err)
+
+ // When no RBDMirror is created
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: c.NamespacedName().Name, Namespace: c.NamespacedName().Namespace}, csiCephConnection)
+ assert.NoError(t, err)
+ assert.Equal(t, csiCephConnection.Spec.RbdMirrorDaemonCount, 0)
+
+ rbdMirror := &cephv1.CephRBDMirror{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-mirror",
+ Namespace: ns,
+ },
+ Spec: cephv1.RBDMirroringSpec{
+ Count: 1,
+ },
+ }
+
+ object = []runtime.Object{
+ rbdMirror,
+ cluster,
+ }
+
+ err = cl.Create(context.TODO(), rbdMirror)
+ assert.NoError(t, err)
+ // Create a fake client to mock API calls.
+ cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ err = CreateUpdateCephConnection(cl, c, cluster.Spec)
+ assert.NoError(t, err)
+
+ // When RBDMirror is created
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: c.NamespacedName().Name, Namespace: c.NamespacedName().Namespace}, csiCephConnection)
+ assert.NoError(t, err)
+ assert.Equal(t, 1, csiCephConnection.Spec.RbdMirrorDaemonCount)
+}
diff --git a/pkg/operator/ceph/csi/config.go b/pkg/operator/ceph/csi/config.go
new file mode 100644
index 000000000000..482c71545ab9
--- /dev/null
+++ b/pkg/operator/ceph/csi/config.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "strings"
+
+ cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func CreateUpdateClientProfileRadosNamespace(ctx context.Context, c client.Client, clusterInfo *cephclient.ClusterInfo, cephBlockPoolRadosNamespacedName types.NamespacedName, clusterID, clusterName string) error {
+
+ logger.Info("creating ceph-csi clientProfile CR for rados namespace")
+
+ csiOpClientProfile := &csiopv1a1.ClientProfile{}
+ csiOpClientProfile.Name = clusterID
+ csiOpClientProfile.Namespace = cephBlockPoolRadosNamespacedName.Namespace
+ csiOpClientProfile.Spec = csiopv1a1.ClientProfileSpec{
+ CephConnectionRef: v1.LocalObjectReference{
+ Name: clusterName,
+ },
+ Rbd: &csiopv1a1.RbdConfigSpec{
+ RadosNamespace: cephBlockPoolRadosNamespacedName.Name,
+ },
+ }
+
+ err := clusterInfo.OwnerInfo.SetOwnerReference(csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to set owner reference for clientProfile CR %q for radosNamespace", csiOpClientProfile.Name)
+ }
+
+ err = c.Get(ctx, types.NamespacedName{Name: csiOpClientProfile.Name, Namespace: csiOpClientProfile.Namespace}, csiOpClientProfile)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ err = c.Create(ctx, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi clientProfile cr for RBD %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully created ceph-csi clientProfile CR for RBD %q", csiOpClientProfile.Name)
+ return nil
+ }
+ return err
+ }
+
+ err = c.Update(ctx, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi clientProfile cr for RBD %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully updated ceph-csi clientProfile CR for RBD %q", csiOpClientProfile.Name)
+
+ return nil
+}
+
+func CreateUpdateClientProfileSubVolumeGroup(ctx context.Context, c client.Client, clusterInfo *cephclient.ClusterInfo, cephFilesystemNamespacedName types.NamespacedName, clusterID, clusterName string) error {
+
+ logger.Info("Creating ceph-csi clientProfile CR for subvolume group")
+
+ csiOpClientProfile := generateProfileSubVolumeGroupSpec(clusterInfo, cephFilesystemNamespacedName, clusterID, clusterName)
+
+ err := clusterInfo.OwnerInfo.SetOwnerReference(csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to set owner reference for clientProfile CR %q for subVolGrp", csiOpClientProfile.Name)
+ }
+
+ err = c.Get(ctx, types.NamespacedName{Name: csiOpClientProfile.Name, Namespace: csiOpClientProfile.Namespace}, csiOpClientProfile)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ err = c.Create(ctx, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi clientProfile cr for subVolGrp %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully created ceph-csi clientProfile CR for subVolGrp %q", csiOpClientProfile.Name)
+ return nil
+ }
+ return err
+ }
+
+ err = c.Update(ctx, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi clientProfile cr for subVolGrp %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully updated ceph-csi clientProfile CR for subVolGrp %q", csiOpClientProfile.Name)
+
+ return nil
+}
+
+func generateProfileSubVolumeGroupSpec(clusterInfo *cephclient.ClusterInfo, cephFilesystemNamespacedName types.NamespacedName, clusterID, clusterName string) *csiopv1a1.ClientProfile {
+ csiOpClientProfile := &csiopv1a1.ClientProfile{}
+ csiOpClientProfile.Name = clusterID
+ csiOpClientProfile.Namespace = cephFilesystemNamespacedName.Namespace
+ csiOpClientProfile.Spec = csiopv1a1.ClientProfileSpec{
+ CephConnectionRef: v1.LocalObjectReference{
+ Name: clusterName,
+ },
+ CephFs: &csiopv1a1.CephFsConfigSpec{
+ SubVolumeGroup: cephFilesystemNamespacedName.Name,
+ },
+ }
+
+ kernelMountKeyVal := strings.Split(clusterInfo.CSIDriverSpec.CephFS.KernelMountOptions, "=")
+ fuseMountKeyVal := strings.Split(clusterInfo.CSIDriverSpec.CephFS.FuseMountOptions, "=")
+
+ if len(kernelMountKeyVal) == 2 {
+ csiOpClientProfile.Spec.CephFs.KernelMountOptions = map[string]string{kernelMountKeyVal[0]: kernelMountKeyVal[1]}
+ }
+
+ if len(fuseMountKeyVal) == 2 {
+ csiOpClientProfile.Spec.CephFs.FuseMountOptions = map[string]string{fuseMountKeyVal[0]: fuseMountKeyVal[1]}
+ }
+
+ return csiOpClientProfile
+}
+
+// CreateDefaultClientProfile creates a default client profile for csi-operator to connect driver
+func CreateDefaultClientProfile(c client.Client, clusterInfo *cephclient.ClusterInfo, namespaced types.NamespacedName) error {
+ logger.Info("Creating ceph-csi clientProfile default CR")
+
+ csiOpClientProfile := &csiopv1a1.ClientProfile{}
+ csiOpClientProfile.Name = clusterInfo.Namespace
+ csiOpClientProfile.Namespace = clusterInfo.Namespace
+ csiOpClientProfile.Spec = csiopv1a1.ClientProfileSpec{
+ CephConnectionRef: v1.LocalObjectReference{
+ Name: namespaced.Name,
+ },
+ }
+
+ err := clusterInfo.OwnerInfo.SetOwnerReference(csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to set owner reference for default clientProfile CR %q", csiOpClientProfile.Name)
+ }
+
+ err = c.Get(clusterInfo.Context, types.NamespacedName{Name: csiOpClientProfile.Name, Namespace: csiOpClientProfile.Namespace}, csiOpClientProfile)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ err = c.Create(clusterInfo.Context, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi for default clientProfile CR %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully created ceph-csi for default clientProfile CR %q", csiOpClientProfile.Name)
+ return nil
+ }
+ return err
+ }
+
+ err = c.Update(clusterInfo.Context, csiOpClientProfile)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-csi for default clientProfile CR %q", csiOpClientProfile.Name)
+ }
+ logger.Infof("successfully updated ceph-csi for default clientProfile CR %q", csiOpClientProfile.Name)
+
+ return nil
+}
diff --git a/pkg/operator/ceph/csi/config_test.go b/pkg/operator/ceph/csi/config_test.go
new file mode 100644
index 000000000000..089d0ae5a2ce
--- /dev/null
+++ b/pkg/operator/ceph/csi/config_test.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ "github.com/rook/rook/pkg/client/clientset/versioned/scheme"
+ clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test"
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestCreateUpdateClientProfile(t *testing.T) {
+ c := clienttest.CreateTestClusterInfo(3)
+ c.CSIDriverSpec = cephv1.CSIDriverSpec{
+ CephFS: cephv1.CSICephFSSpec{
+ KernelMountOptions: "ms_mode=crc",
+ },
+ }
+
+ kernelMountKeyVal := strings.Split(c.CSIDriverSpec.CephFS.KernelMountOptions, "=")
+ assert.Equal(t, len(kernelMountKeyVal), 2)
+ assert.Equal(t, kernelMountKeyVal[0], "ms_mode")
+ assert.Equal(t, kernelMountKeyVal[1], "crc")
+
+ ns := "test"
+ c.Namespace = ns
+ c.SetName("testcluster")
+ c.NamespacedName()
+ clusterName := "testClusterName"
+ cephBlockPoolRadosNamespacedName := types.NamespacedName{Namespace: ns, Name: "cephBlockPoolRadosNames"}
+ cephSubVolGrpNamespacedName := types.NamespacedName{Namespace: ns, Name: "cephSubVolumeGroupNames"}
+ csiOpClientProfile := &csiopv1a1.ClientProfile{}
+
+ // Register operator types with the runtime scheme.
+ s := scheme.Scheme
+ s.AddKnownTypes(cephv1.SchemeGroupVersion, csiOpClientProfile)
+ object := []runtime.Object{
+ csiOpClientProfile,
+ }
+
+ // Create a fake client to mock API calls.
+ cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ err := CreateUpdateClientProfileRadosNamespace(context.TODO(), cl, c, cephBlockPoolRadosNamespacedName, cephBlockPoolRadosNamespacedName.Name, clusterName)
+ assert.NoError(t, err)
+
+ err = CreateUpdateClientProfileSubVolumeGroup(context.TODO(), cl, c, cephSubVolGrpNamespacedName, cephSubVolGrpNamespacedName.Name, clusterName)
+ assert.NoError(t, err)
+
+ err = cl.Get(context.TODO(), cephBlockPoolRadosNamespacedName, csiOpClientProfile)
+ assert.NoError(t, err)
+ assert.Equal(t, csiOpClientProfile.Spec.Rbd.RadosNamespace, cephBlockPoolRadosNamespacedName.Name)
+
+ err = cl.Get(context.TODO(), cephSubVolGrpNamespacedName, csiOpClientProfile)
+ assert.NoError(t, err)
+ assert.Equal(t, csiOpClientProfile.Spec.CephFs.SubVolumeGroup, cephSubVolGrpNamespacedName.Name)
+ assert.Equal(t, csiOpClientProfile.Spec.CephFs.KernelMountOptions["ms_mode"], kernelMountKeyVal[1])
+}
diff --git a/pkg/operator/ceph/csi/controller.go b/pkg/operator/ceph/csi/controller.go
index 86473acff5cd..f5341a48325f 100644
--- a/pkg/operator/ceph/csi/controller.go
+++ b/pkg/operator/ceph/csi/controller.go
@@ -21,6 +21,7 @@ import (
"os"
"strconv"
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -30,7 +31,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
- addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/apis/csiaddons/v1alpha1"
+ addonsv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/csiaddons/v1alpha1"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/clusterd"
@@ -42,6 +43,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/version"
)
const (
@@ -56,6 +58,8 @@ type ReconcileCSI struct {
opManagerContext context.Context
opConfig opcontroller.OperatorConfig
clustersWithHolder []ClusterDetail
+ // the first cluster CR which will determine some settings for the csi driver
+ firstCephCluster *cephv1.ClusterSpec
}
// ClusterDetail is a struct that holds the information of a cluster, it knows its internals (like
@@ -119,6 +123,11 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, opCon
return err
}
+ err = csiopv1a1.AddToScheme(mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
return nil
}
@@ -141,6 +150,7 @@ var reconcileSaveCSIDriverOptions = SaveCSIDriverOptions
func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, error) {
// reconcileResult is used to communicate the result of the reconciliation back to the caller
var reconcileResult reconcile.Result
+ var clusterNamespace string
ownerRef, err := k8sutil.GetDeploymentOwnerReference(r.opManagerContext, r.context.Clientset, os.Getenv(k8sutil.PodNameEnvVar), r.opConfig.OperatorNamespace)
if err != nil {
@@ -178,18 +188,22 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
r.opConfig.Parameters = opConfig.Data
}
+ serverVersion, err := r.context.Clientset.Discovery().ServerVersion()
+ if err != nil {
+ return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to get server version")
+ }
+
+ enableCSIOperator, err = strconv.ParseBool(k8sutil.GetValue(r.opConfig.Parameters, "ROOK_USE_CSI_OPERATOR", "false"))
+ if err != nil {
+ return reconcileResult, errors.Wrap(err, "unable to parse value for 'ROOK_USE_CSI_OPERATOR'")
+ }
+
// do not recocnile if csi driver is disabled
disableCSI, err := strconv.ParseBool(k8sutil.GetValue(r.opConfig.Parameters, "ROOK_CSI_DISABLE_DRIVER", "false"))
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "unable to parse value for 'ROOK_CSI_DISABLE_DRIVER")
} else if disableCSI {
logger.Info("ceph csi driver is disabled")
- return reconcile.Result{}, nil
- }
-
- serverVersion, err := r.context.Clientset.Discovery().ServerVersion()
- if err != nil {
- return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to get server version")
}
// See if there is a CephCluster
@@ -264,6 +278,10 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return reconcile.Result{}, nil
}
+ if r.firstCephCluster == nil {
+ r.firstCephCluster = &cephClusters.Items[i].Spec
+ }
+
// Load cluster info for later use in updating the ceph-csi configmap
clusterInfo, _, _, err := opcontroller.LoadClusterInfo(r.context, r.opManagerContext, cluster.Namespace, &cephClusters.Items[i].Spec)
if err != nil {
@@ -277,6 +295,7 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to load cluster info for cluster %q", cluster.Name)
}
clusterInfo.OwnerInfo = k8sutil.NewOwnerInfo(&cephClusters.Items[i], r.scheme)
+ clusterNamespace = clusterInfo.Namespace
// is holder enabled for this cluster?
thisHolderEnabled := (!csiHostNetworkEnabled || cluster.Spec.Network.IsMultus()) && !csiDisableHolders
@@ -299,16 +318,57 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e
if err != nil {
return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to update CSI driver options for cluster %q", cluster.Name)
}
+
+ // Skip the new CSI-operator creation when holder pod is enabled until multus support is added in the CSI operator
+ if EnableCSIOperator() {
+ logger.Info("disabling csi-driver since EnableCSIOperator is true")
+ err := r.stopDrivers(serverVersion)
+ if err != nil {
+ return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to stop csi Drivers")
+ }
+ err = r.reconcileOperatorConfig(cluster, clusterInfo, serverVersion)
+ if err != nil {
+ return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to reconcile csi-op config CR")
+ }
+ return reconcileResult, nil
+ } else {
+ r.deleteCSIOperatorResources(clusterNamespace, false)
+ }
}
- err = r.validateAndConfigureDrivers(serverVersion, ownerInfo)
- if err != nil {
- return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to configure ceph csi")
+ if !EnableCSIOperator() {
+ r.deleteCSIOperatorResources(clusterNamespace, true)
+
+ err = r.validateAndConfigureDrivers(serverVersion, ownerInfo)
+ if err != nil {
+ return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to configure ceph csi")
+ }
}
return reconcileResult, nil
}
+func (r *ReconcileCSI) reconcileOperatorConfig(cluster cephv1.CephCluster, clusterInfo *cephclient.ClusterInfo, serverVersion *version.Info) error {
+ if err := r.setParams(serverVersion); err != nil {
+ return errors.Wrapf(err, "failed to configure CSI parameters")
+ }
+
+ if err := validateCSIParam(); err != nil {
+ return errors.Wrapf(err, "failed to validate CSI parameters")
+ }
+
+ err := r.createOrUpdateOperatorConfig(cluster)
+ if err != nil {
+ return errors.Wrap(err, "failed to configure csi operator operator config cr")
+ }
+
+ err = r.createOrUpdateDriverResources(cluster, clusterInfo)
+ if err != nil {
+ return errors.Wrap(err, "failed to configure ceph-CSI operator drivers cr")
+ }
+ return nil
+}
+
func (r *ReconcileCSI) setCSILogrotateParams(cephClustersItems []cephv1.CephCluster) {
logger.Debug("set logrotate values in csi param")
spec := cephClustersItems[0].Spec
diff --git a/pkg/operator/ceph/csi/operator_config.go b/pkg/operator/ceph/csi/operator_config.go
new file mode 100644
index 000000000000..82f27c19a9ba
--- /dev/null
+++ b/pkg/operator/ceph/csi/operator_config.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "reflect"
+
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ "github.com/pkg/errors"
+ v1 "k8s.io/api/core/v1"
+ k8scsiv1 "k8s.io/api/storage/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const (
+ opConfigCRName = "ceph-csi-operator-config"
+ imageSetConfigMap = "rook-csi-operator-image-set-configmap"
+)
+
+func (r *ReconcileCSI) createOrUpdateOperatorConfig(cluster cephv1.CephCluster) error {
+ logger.Info("Creating ceph-CSI operator config CR")
+
+ opConfig := &csiopv1a1.OperatorConfig{}
+ opConfig.Name = opConfigCRName
+ opConfig.Namespace = r.opConfig.OperatorNamespace
+
+ imageSetCmName, err := r.createImageSetConfigmap()
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-CSI operator config ImageSetConfigmap for CR %s", opConfigCRName)
+ }
+
+ spec := r.generateCSIOpConfigSpec(cluster, opConfig, imageSetCmName)
+
+ err = r.client.Get(r.opManagerContext, types.NamespacedName{Name: opConfigCRName, Namespace: r.opConfig.OperatorNamespace}, opConfig)
+ if err != nil {
+ if kerrors.IsNotFound(err) {
+ opConfig.Spec = spec
+ err = r.client.Create(r.opManagerContext, opConfig)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create ceph-CSI operator operator config CR %q", opConfig.Name)
+ }
+
+ logger.Infof("Successfully created ceph-CSI operator config CR %q", opConfig.Name)
+ return nil
+ }
+ return errors.Wrapf(err, "failed to get ceph-CSI operator operator config CR %q", opConfigCRName)
+ }
+
+ opConfig.Spec = spec
+ err = r.client.Update(r.opManagerContext, opConfig)
+ if err != nil {
+ return errors.Wrapf(err, "failed to update ceph-CSI operator operator config CR %q", opConfig.Name)
+ }
+ logger.Infof("Successfully updated ceph-CSI operator config CR %q", opConfig.Name)
+
+ return nil
+}
+
+func (r *ReconcileCSI) generateCSIOpConfigSpec(cluster cephv1.CephCluster, opConfig *csiopv1a1.OperatorConfig, imageSetCmName string) csiopv1a1.OperatorConfigSpec {
+ cephfsClientType := csiopv1a1.KernelCephFsClient
+ if CSIParam.ForceCephFSKernelClient == "false" {
+ cephfsClientType = csiopv1a1.AutoDetectCephFsClient
+ }
+
+ opConfig.Spec = csiopv1a1.OperatorConfigSpec{
+ DriverSpecDefaults: &csiopv1a1.DriverSpec{
+ Log: &csiopv1a1.LogSpec{
+ Verbosity: int(CSIParam.LogLevel),
+ },
+ ImageSet: &v1.LocalObjectReference{
+ Name: imageSetCmName,
+ },
+ ClusterName: &cluster.Name,
+ EnableMetadata: &CSIParam.CSIEnableMetadata,
+ GenerateOMapInfo: &CSIParam.EnableOMAPGenerator,
+ FsGroupPolicy: k8scsiv1.FileFSGroupPolicy,
+ NodePlugin: &csiopv1a1.NodePluginSpec{
+ PodCommonSpec: csiopv1a1.PodCommonSpec{
+ PrioritylClassName: &CSIParam.ProvisionerPriorityClassName,
+ Affinity: &v1.Affinity{
+ NodeAffinity: getNodeAffinity(r.opConfig.Parameters, pluginNodeAffinityEnv, &v1.NodeAffinity{}),
+ },
+ Tolerations: getToleration(r.opConfig.Parameters, pluginTolerationsEnv, []v1.Toleration{}),
+ },
+ Resources: csiopv1a1.NodePluginResourcesSpec{},
+ KubeletDirPath: CSIParam.KubeletDirPath,
+ EnableSeLinuxHostMount: &CSIParam.EnablePluginSelinuxHostMount,
+ },
+ ControllerPlugin: &csiopv1a1.ControllerPluginSpec{
+ PodCommonSpec: csiopv1a1.PodCommonSpec{
+ PrioritylClassName: &CSIParam.PluginPriorityClassName,
+ Affinity: &v1.Affinity{
+ NodeAffinity: getNodeAffinity(r.opConfig.Parameters, provisionerNodeAffinityEnv, &v1.NodeAffinity{}),
+ },
+ Tolerations: getToleration(r.opConfig.Parameters, provisionerTolerationsEnv, []v1.Toleration{}),
+ },
+ Replicas: &CSIParam.ProvisionerReplicas,
+ Resources: csiopv1a1.ControllerPluginResourcesSpec{},
+ },
+ DeployCsiAddons: &CSIParam.EnableCSIAddonsSideCar,
+ CephFsClientType: cephfsClientType,
+ },
+ }
+ if !reflect.DeepEqual(cluster.Spec.Network, cephv1.NetworkSpec{}) {
+
+ if cluster.Spec.Network.Connections.Encryption.Enabled {
+ opConfig.Spec.DriverSpecDefaults.Encryption = &csiopv1a1.EncryptionSpec{
+ ConfigMapRef: v1.LocalObjectReference{
+ Name: "rook-ceph-csi-kms-config",
+ },
+ }
+ }
+ }
+
+ return opConfig.Spec
+}
+
+func (r *ReconcileCSI) createImageSetConfigmap() (string, error) {
+
+ data := map[string]string{
+ "provisioner": CSIParam.ProvisionerImage,
+ "attacher": CSIParam.AttacherImage,
+ "resizer": CSIParam.ResizerImage,
+ "snapshotter": CSIParam.SnapshotterImage,
+ "registrar": CSIParam.RegistrarImage,
+ "plugin": CSIParam.CSIPluginImage,
+ "addons": CSIParam.CSIAddonsImage,
+ }
+
+ cm := &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "rook-csi-operator-image-set-configmap",
+ Namespace: r.opConfig.OperatorNamespace,
+ },
+ Data: data,
+ }
+
+ err := r.client.Get(r.opManagerContext, types.NamespacedName{Name: cm.Name, Namespace: r.opConfig.OperatorNamespace}, cm)
+ if err != nil {
+ if kerrors.IsNotFound(err) {
+ err = r.client.Create(r.opManagerContext, cm)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to create imageSet cm %q for ceph-CSI operator-config CR %q", cm.Name, opConfigCRName)
+ }
+
+ logger.Infof("Successfully create imageSet cm %s for ceph-CSI operator-config CR %q", cm.Name, opConfigCRName)
+ return cm.Name, nil
+ }
+ return "", errors.Wrapf(err, "failed to get imageSet cm %q for ceph-CSI operator-config CR %q", cm.Name, opConfigCRName)
+ }
+
+ cm.Data = data
+ err = r.client.Update(r.opManagerContext, cm)
+ if err != nil {
+ return "", errors.Wrapf(err, "failed to updated imageSet cm %q ceph-CSI operator-config CR %q", cm.Name, opConfigCRName)
+ }
+ logger.Infof("Successfully updated imageSet cm %s for ceph-CSI operator-config CR %q", cm.Name, opConfigCRName)
+
+ return cm.Name, nil
+}
+
+func (r *ReconcileCSI) deleteImageSetConfigMap() error {
+ cm := &v1.ConfigMap{}
+ err := r.client.Get(r.opManagerContext, types.NamespacedName{Name: cm.Name, Namespace: r.opConfig.OperatorNamespace}, cm)
+ if err != nil {
+ if kerrors.IsNotFound(err) {
+ return nil
+ }
+ }
+ err = r.client.Delete(r.opManagerContext, cm)
+ if nil != err {
+ return errors.Wrapf(err, "failed to delete imageSet configMap %v", cm.Name)
+ }
+ logger.Infof("deleted imageSet configMap %q", cm.Name)
+
+ return nil
+}
diff --git a/pkg/operator/ceph/csi/operator_config_test.go b/pkg/operator/ceph/csi/operator_config_test.go
new file mode 100644
index 000000000000..4beb6a9a52f3
--- /dev/null
+++ b/pkg/operator/ceph/csi/operator_config_test.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "testing"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake"
+ "github.com/rook/rook/pkg/client/clientset/versioned/scheme"
+ "github.com/rook/rook/pkg/clusterd"
+ opcontroller "github.com/rook/rook/pkg/operator/ceph/controller"
+ testop "github.com/rook/rook/pkg/operator/test"
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestReconcileCSI_createOrUpdateOperatorConfig(t *testing.T) {
+ ns := "test"
+ r := &ReconcileCSI{
+ context: &clusterd.Context{
+ Clientset: testop.New(t, 1),
+ RookClientset: rookclient.NewSimpleClientset(),
+ },
+ opManagerContext: context.TODO(),
+ opConfig: opcontroller.OperatorConfig{
+ OperatorNamespace: "test",
+ },
+ clustersWithHolder: []ClusterDetail{},
+ }
+ cluster := &cephv1.CephCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testCluster",
+ Namespace: ns,
+ },
+ Spec: cephv1.ClusterSpec{
+ CSI: cephv1.CSIDriverSpec{
+ ReadAffinity: cephv1.ReadAffinitySpec{
+ Enabled: true,
+ CrushLocationLabels: []string{"kubernetes.io/hostname"},
+ },
+ CephFS: cephv1.CSICephFSSpec{
+ KernelMountOptions: "ms_mode=crc",
+ },
+ },
+ Network: cephv1.NetworkSpec{
+ Connections: &cephv1.ConnectionsSpec{
+ Encryption: &cephv1.EncryptionSpec{
+ Enabled: true,
+ },
+ },
+ },
+ },
+ }
+ opConfig := &csiopv1a1.OperatorConfig{}
+
+ // Register operator types with the runtime scheme.
+ s := scheme.Scheme
+ s.AddKnownTypes(cephv1.SchemeGroupVersion, opConfig, &cephv1.CephCluster{}, &v1.ConfigMap{})
+ object := []runtime.Object{
+ cluster,
+ }
+ cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ r.client = cl
+
+ err := r.createOrUpdateOperatorConfig(*cluster)
+ assert.NoError(t, err)
+
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: opConfigCRName, Namespace: r.opConfig.OperatorNamespace}, opConfig)
+ assert.NoError(t, err)
+ assert.Equal(t, *opConfig.Spec.DriverSpecDefaults.EnableMetadata, false)
+ assert.Equal(t, opConfig.Spec.DriverSpecDefaults.Encryption.ConfigMapRef, v1.LocalObjectReference{Name: "rook-ceph-csi-kms-config"})
+}
diff --git a/pkg/operator/ceph/csi/operator_driver.go b/pkg/operator/ceph/csi/operator_driver.go
new file mode 100644
index 000000000000..e13f5afee147
--- /dev/null
+++ b/pkg/operator/ceph/csi/operator_driver.go
@@ -0,0 +1,417 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ "github.com/pkg/errors"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ k8scsiv1 "k8s.io/api/storage/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func (r *ReconcileCSI) createOrUpdateDriverResources(cluster cephv1.CephCluster, clusterInfo *cephclient.ClusterInfo) error {
+
+ if EnableRBD {
+ logger.Info("Creating RBD driver resources")
+ err := r.transferCSIDriverOwner(r.opManagerContext, clusterInfo, RBDDriverName)
+ if err != nil {
+ return errors.Wrap(err, "failed to create update RBD driver for csi-operator driver CR ")
+ }
+ err = r.createOrUpdateRBDDriverResource(cluster, clusterInfo)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update RBD driver resource in the namespace %q", clusterInfo.Namespace)
+ }
+ }
+ if EnableCephFS {
+ logger.Info("Creating CephFS driver resources")
+ err := r.transferCSIDriverOwner(r.opManagerContext, clusterInfo, CephFSDriverName)
+ if err != nil {
+ return errors.Wrap(err, "failed to create update CephFS driver for csi-operator driver CR ")
+ }
+ err = r.createOrUpdateCephFSDriverResource(cluster, clusterInfo)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update cephFS driver resource in the namespace %q", clusterInfo.Namespace)
+ }
+ }
+ if EnableNFS {
+ logger.Info("Creating NFS driver resources")
+ err := r.transferCSIDriverOwner(r.opManagerContext, clusterInfo, NFSDriverName)
+ if err != nil {
+ return errors.Wrap(err, "failed to create update NFS driver for csi-operator driver CR ")
+ }
+ err = r.createOrUpdateNFSDriverResource(cluster, clusterInfo)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update NFS driver resource in the namespace %q", clusterInfo.Namespace)
+ }
+ }
+
+ return nil
+}
+
+func (r *ReconcileCSI) createOrUpdateRBDDriverResource(cluster cephv1.CephCluster, clusterInfo *cephclient.ClusterInfo) error {
+ resourceName := fmt.Sprintf("%s.rbd.csi.ceph.com", clusterInfo.Namespace)
+ spec, err := r.generateDriverSpec(cluster.Name)
+ if err != nil {
+ return err
+ }
+
+ rbdDriver := &csiopv1a1.Driver{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName,
+ Namespace: clusterInfo.Namespace,
+ },
+ Spec: spec,
+ }
+
+ rbdDriver.Spec.ControllerPlugin.Resources = createDriverControllerPluginResources(r.opConfig.Parameters, rbdPluginResource)
+ rbdDriver.Spec.Liveness = &csiopv1a1.LivenessSpec{
+ MetricsPort: int(CSIParam.RBDLivenessMetricsPort),
+ }
+ rbdDriver.Spec.NodePlugin.Resources = createDriverNodePluginResouces(r.opConfig.Parameters, rbdProvisionerResource)
+ rbdDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.RollingUpdateDaemonSetStrategyType,
+ }
+
+ if CSIParam.CSIDomainLabels != "" {
+ domainLabels := strings.Split(CSIParam.CSIDomainLabels, ",")
+ rbdDriver.Spec.NodePlugin.Topology = &csiopv1a1.TopologySpec{
+ DomainLabels: domainLabels,
+ }
+ }
+
+ if CSIParam.RBDPluginUpdateStrategy == "OnDelete" {
+ rbdDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.OnDeleteDaemonSetStrategyType,
+ }
+ }
+
+ err = r.createOrUpdateDriverResource(clusterInfo, rbdDriver)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update RDB driver resource %q", rbdDriver.Name)
+ }
+
+ return nil
+}
+
+func (r *ReconcileCSI) createOrUpdateCephFSDriverResource(cluster cephv1.CephCluster, clusterInfo *cephclient.ClusterInfo) error {
+ resourceName := fmt.Sprintf("%s.cephfs.csi.ceph.com", clusterInfo.Namespace)
+ spec, err := r.generateDriverSpec(cluster.Name)
+ if err != nil {
+ return err
+ }
+
+ cephFsDriver := &csiopv1a1.Driver{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName,
+ Namespace: clusterInfo.Namespace,
+ },
+ Spec: spec,
+ }
+
+ cephFsDriver.Spec.SnapshotPolicy = csiopv1a1.NoneSnapshotPolicy
+ if CSIParam.EnableVolumeGroupSnapshot {
+ cephFsDriver.Spec.SnapshotPolicy = csiopv1a1.VolumeGroupSnapshotPolicy
+ }
+
+ cephFsDriver.Spec.ControllerPlugin.Resources = createDriverControllerPluginResources(r.opConfig.Parameters, cephFSPluginResource)
+ cephFsDriver.Spec.Liveness = &csiopv1a1.LivenessSpec{
+ MetricsPort: int(CSIParam.CephFSLivenessMetricsPort),
+ }
+
+ cephFsDriver.Spec.NodePlugin.Resources = createDriverNodePluginResouces(r.opConfig.Parameters, cephFSProvisionerResource)
+ cephFsDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.RollingUpdateDaemonSetStrategyType,
+ }
+
+ if CSIParam.CSIDomainLabels != "" {
+ domainLabels := strings.Split(CSIParam.CSIDomainLabels, ",")
+ cephFsDriver.Spec.NodePlugin.Topology = &csiopv1a1.TopologySpec{
+ DomainLabels: domainLabels,
+ }
+ }
+
+ if CSIParam.RBDPluginUpdateStrategy == "OnDelete" {
+ cephFsDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.OnDeleteDaemonSetStrategyType,
+ }
+ }
+
+ err = r.createOrUpdateDriverResource(clusterInfo, cephFsDriver)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update cephFS driver resource %q", cephFsDriver.Name)
+ }
+
+ return nil
+}
+
+func (r *ReconcileCSI) createOrUpdateNFSDriverResource(cluster cephv1.CephCluster, clusterInfo *cephclient.ClusterInfo) error {
+ resourceName := fmt.Sprintf("%s.nfs.csi.ceph.com", clusterInfo.Namespace)
+ spec, err := r.generateDriverSpec(cluster.Name)
+ if err != nil {
+ return err
+ }
+
+ NFSDriver := &csiopv1a1.Driver{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName,
+ Namespace: clusterInfo.Namespace,
+ },
+ Spec: spec,
+ }
+
+ NFSDriver.Spec.ControllerPlugin.Resources = createDriverControllerPluginResources(r.opConfig.Parameters, nfsPluginResource)
+
+ NFSDriver.Spec.NodePlugin.Resources = createDriverNodePluginResouces(r.opConfig.Parameters, nfsProvisionerResource)
+ NFSDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.RollingUpdateDaemonSetStrategyType,
+ }
+
+ if CSIParam.CSIDomainLabels != "" {
+ domainLabels := strings.Split(CSIParam.CSIDomainLabels, ",")
+ NFSDriver.Spec.NodePlugin.Topology = &csiopv1a1.TopologySpec{
+ DomainLabels: domainLabels,
+ }
+ }
+
+ if CSIParam.RBDPluginUpdateStrategy == "OnDelete" {
+ NFSDriver.Spec.NodePlugin.UpdateStrategy = &v1.DaemonSetUpdateStrategy{
+ Type: v1.OnDeleteDaemonSetStrategyType,
+ }
+ }
+
+ err = r.createOrUpdateDriverResource(clusterInfo, NFSDriver)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create or update NFS driver resource %q", NFSDriver.Name)
+ }
+
+ return nil
+}
+
+func (r ReconcileCSI) createOrUpdateDriverResource(clusterInfo *cephclient.ClusterInfo, driverResource *csiopv1a1.Driver) error {
+ spec := driverResource.Spec
+
+ err := r.client.Get(r.opManagerContext, types.NamespacedName{Name: driverResource.Name, Namespace: clusterInfo.Namespace}, driverResource)
+ if err != nil {
+ if kerrors.IsNotFound(err) {
+ err = r.client.Create(r.opManagerContext, driverResource)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create CSI-operator driver CR %q", driverResource.Name)
+ }
+
+ logger.Infof("successfully created CSI driver cr %q", driverResource.Name)
+ return nil
+ }
+ return errors.Wrapf(err, "failed to get CSI-operator driver CR %q", opConfigCRName)
+ }
+
+ driverResource.Spec = spec
+ err = r.client.Update(r.opManagerContext, driverResource)
+ if err != nil {
+ return errors.Wrapf(err, "failed to update CSI-operator driver CR %q", driverResource.Name)
+ }
+
+ logger.Infof("successfully updated CSI-operator driver resource %q", driverResource.Name)
+ return nil
+}
+
+func (r *ReconcileCSI) generateDriverSpec(clusterName string) (csiopv1a1.DriverSpec, error) {
+ cephfsClientType := csiopv1a1.KernelCephFsClient
+ if CSIParam.ForceCephFSKernelClient == "false" {
+ cephfsClientType = csiopv1a1.AutoDetectCephFsClient
+ }
+ imageSetCmName, err := r.createImageSetConfigmap()
+ if err != nil {
+ return csiopv1a1.DriverSpec{}, errors.Wrapf(err, "failed to create ceph-CSI operator config ImageSetConfigmap for CR %s", opConfigCRName)
+ }
+
+ return csiopv1a1.DriverSpec{
+ Log: &csiopv1a1.LogSpec{
+ Verbosity: int(CSIParam.LogLevel),
+ },
+ ImageSet: &corev1.LocalObjectReference{
+ Name: imageSetCmName,
+ },
+ ClusterName: &clusterName,
+ EnableMetadata: &CSIParam.CSIEnableMetadata,
+ GenerateOMapInfo: &CSIParam.EnableOMAPGenerator,
+ FsGroupPolicy: k8scsiv1.FileFSGroupPolicy,
+ NodePlugin: &csiopv1a1.NodePluginSpec{
+ PodCommonSpec: csiopv1a1.PodCommonSpec{
+ PrioritylClassName: &CSIParam.ProvisionerPriorityClassName,
+ Affinity: &corev1.Affinity{
+ NodeAffinity: getNodeAffinity(r.opConfig.Parameters, pluginNodeAffinityEnv, &corev1.NodeAffinity{}),
+ },
+ Tolerations: getToleration(r.opConfig.Parameters, pluginTolerationsEnv, []corev1.Toleration{}),
+ },
+ Resources: csiopv1a1.NodePluginResourcesSpec{},
+ KubeletDirPath: CSIParam.KubeletDirPath,
+ EnableSeLinuxHostMount: &CSIParam.EnablePluginSelinuxHostMount,
+ },
+ ControllerPlugin: &csiopv1a1.ControllerPluginSpec{
+ PodCommonSpec: csiopv1a1.PodCommonSpec{
+ PrioritylClassName: &CSIParam.PluginPriorityClassName,
+ Affinity: &corev1.Affinity{
+ NodeAffinity: getNodeAffinity(r.opConfig.Parameters, provisionerNodeAffinityEnv, &corev1.NodeAffinity{}),
+ },
+ Tolerations: getToleration(r.opConfig.Parameters, provisionerTolerationsEnv, []corev1.Toleration{}),
+ },
+ Replicas: &CSIParam.ProvisionerReplicas,
+ Resources: csiopv1a1.ControllerPluginResourcesSpec{},
+ },
+ DeployCsiAddons: &CSIParam.EnableCSIAddonsSideCar,
+ CephFsClientType: cephfsClientType,
+ }, nil
+}
+
+func createDriverControllerPluginResources(opConfig map[string]string, key string) csiopv1a1.ControllerPluginResourcesSpec {
+ controllerPluginResources := csiopv1a1.ControllerPluginResourcesSpec{}
+ resource := getComputeResource(opConfig, key)
+
+ for _, r := range resource {
+ if !reflect.DeepEqual(r.Resource, corev1.ResourceRequirements{}) {
+ switch {
+ case strings.Contains(r.Name, "provisioner"):
+ controllerPluginResources.Provisioner = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "resizer"):
+ controllerPluginResources.Resizer = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "snapshotter"):
+ controllerPluginResources.Snapshotter = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "attacher"):
+ controllerPluginResources.Attacher = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "plugin"):
+ controllerPluginResources.Plugin = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "omap-generator"):
+ controllerPluginResources.OMapGenerator = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "liveness"):
+ controllerPluginResources.Liveness = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ case strings.Contains(r.Name, "addons"):
+ controllerPluginResources.Addons = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ }
+ }
+ }
+ return controllerPluginResources
+}
+
+func createDriverNodePluginResouces(opConfig map[string]string, key string) csiopv1a1.NodePluginResourcesSpec {
+ nodePluginResources := csiopv1a1.NodePluginResourcesSpec{}
+ resource := getComputeResource(opConfig, key)
+
+ for _, r := range resource {
+ if !reflect.DeepEqual(r.Resource, corev1.ResourceRequirements{}) {
+ if strings.Contains(r.Name, "registrar") {
+ nodePluginResources.Registrar = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ } else if strings.Contains(r.Name, "plugin") {
+ nodePluginResources.Plugin = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ } else if strings.Contains(r.Name, "liveness") {
+ nodePluginResources.Liveness = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ } else if strings.Contains(r.Name, "addons") {
+ nodePluginResources.Addons = &corev1.ResourceRequirements{
+ Limits: r.Resource.Limits,
+ Requests: r.Resource.Requests,
+ }
+ }
+ }
+ }
+ return nodePluginResources
+}
+
+// transferCSIDriverOwner update CSIDriver and returns the error if any
+func (r *ReconcileCSI) transferCSIDriverOwner(ctx context.Context, clusterInfo *cephclient.ClusterInfo, name string) error {
+
+ logger.Info("adding annotation to CSIDriver resource for csi-operator to own it")
+ csiDriver, err := r.context.Clientset.StorageV1().CSIDrivers().Get(ctx, name, metav1.GetOptions{})
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ logger.Debugf("%s CSIDriver not found; skipping ownership transfer.", name)
+ return nil
+ }
+ }
+
+ key := "csi.ceph.io/ownerref"
+ ownerObjKey := client.ObjectKeyFromObject(csiDriver)
+ val, err := json.Marshal(ownerObjKey)
+ if err != nil {
+ return errors.Wrapf(err, "failed to marshal owner object key %q", ownerObjKey.Name)
+ }
+
+ annotations := csiDriver.GetAnnotations()
+ if annotations == nil {
+ annotations = map[string]string{}
+ csiDriver.SetAnnotations(annotations)
+ }
+ if oldValue, exist := annotations[key]; !exist || oldValue != string(val) {
+ annotations[key] = string(val)
+ } else {
+ return nil
+ }
+ _, err = r.context.Clientset.StorageV1().CSIDrivers().Update(ctx, csiDriver, metav1.UpdateOptions{})
+ if err != nil {
+ return errors.Wrapf(err, "failed to update CSIDriver %s", name)
+ }
+
+ return nil
+}
diff --git a/pkg/operator/ceph/csi/operator_driver_test.go b/pkg/operator/ceph/csi/operator_driver_test.go
new file mode 100644
index 000000000000..c1c0c2831c3b
--- /dev/null
+++ b/pkg/operator/ceph/csi/operator_driver_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2024 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package csi
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
+ rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake"
+ "github.com/rook/rook/pkg/client/clientset/versioned/scheme"
+ "github.com/rook/rook/pkg/clusterd"
+ clienttest "github.com/rook/rook/pkg/daemon/ceph/client/test"
+ opcontroller "github.com/rook/rook/pkg/operator/ceph/controller"
+ testop "github.com/rook/rook/pkg/operator/test"
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+)
+
+func TestReconcileCSI_createOrUpdateDriverResources(t *testing.T) {
+ ns := "test"
+ r := &ReconcileCSI{
+ context: &clusterd.Context{
+ Clientset: testop.New(t, 1),
+ RookClientset: rookclient.NewSimpleClientset(),
+ },
+ opManagerContext: context.TODO(),
+ opConfig: opcontroller.OperatorConfig{
+ OperatorNamespace: "test",
+ },
+ clustersWithHolder: []ClusterDetail{},
+ }
+ cluster := &cephv1.CephCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testCluster",
+ Namespace: ns,
+ },
+ Spec: cephv1.ClusterSpec{
+ CSI: cephv1.CSIDriverSpec{
+ ReadAffinity: cephv1.ReadAffinitySpec{
+ Enabled: true,
+ CrushLocationLabels: []string{"kubernetes.io/hostname"},
+ },
+ CephFS: cephv1.CSICephFSSpec{
+ KernelMountOptions: "ms_mode=crc",
+ },
+ },
+ Network: cephv1.NetworkSpec{
+ Connections: &cephv1.ConnectionsSpec{
+ Encryption: &cephv1.EncryptionSpec{
+ Enabled: true,
+ },
+ },
+ },
+ },
+ }
+ driver := &csiopv1a1.Driver{}
+
+ // Register operator types with the runtime scheme.
+ s := scheme.Scheme
+ s.AddKnownTypes(cephv1.SchemeGroupVersion, driver, &cephv1.CephCluster{}, &v1.ConfigMap{})
+ object := []runtime.Object{
+ cluster,
+ }
+ cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ r.client = cl
+
+ EnableRBD = true
+ EnableCephFS = true
+ EnableNFS = true
+
+ c := clienttest.CreateTestClusterInfo(3)
+ c.Namespace = ns
+ c.SetName("testcluster")
+ c.NamespacedName()
+
+ err := r.createOrUpdateDriverResources(*cluster, c)
+ assert.NoError(t, err)
+
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s.rbd.csi.ceph.com", c.Namespace), Namespace: ns}, driver)
+ assert.NoError(t, err)
+
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s.cephfs.csi.ceph.com", c.Namespace), Namespace: ns}, driver)
+ assert.NoError(t, err)
+
+ err = cl.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s.nfs.csi.ceph.com", c.Namespace), Namespace: ns}, driver)
+ assert.NoError(t, err)
+}
diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go
index 8d2d7cc0e3dc..631b0446f01c 100644
--- a/pkg/operator/ceph/csi/spec.go
+++ b/pkg/operator/ceph/csi/spec.go
@@ -25,6 +25,7 @@ import (
"strings"
"time"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/operator/ceph/cluster/telemetry"
opcontroller "github.com/rook/rook/pkg/operator/ceph/controller"
"github.com/rook/rook/pkg/operator/k8sutil"
@@ -41,6 +42,7 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
cephcsi "github.com/ceph/ceph-csi/api/deploy/kubernetes"
)
@@ -129,6 +131,7 @@ var (
EnableRBD = false
EnableCephFS = false
EnableNFS = false
+ enableCSIOperator = false
AllowUnsupported = false
CustomCSICephConfigExists = false
@@ -148,13 +151,13 @@ var (
// manually challenging.
var (
// image names
- DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.11.0"
- DefaultRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1"
- DefaultProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner:v4.0.1"
- DefaultAttacherImage = "registry.k8s.io/sig-storage/csi-attacher:v4.5.1"
- DefaultSnapshotterImage = "registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2"
- DefaultResizerImage = "registry.k8s.io/sig-storage/csi-resizer:v1.10.1"
- DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.8.0"
+ DefaultCSIPluginImage = "quay.io/cephcsi/cephcsi:v3.12.0"
+ DefaultRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.11.1"
+ DefaultProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner:v5.0.1"
+ DefaultAttacherImage = "registry.k8s.io/sig-storage/csi-attacher:v4.6.1"
+ DefaultSnapshotterImage = "registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1"
+ DefaultResizerImage = "registry.k8s.io/sig-storage/csi-resizer:v1.11.1"
+ DefaultCSIAddonsImage = "quay.io/csiaddons/k8s-sidecar:v0.9.0"
// image pull policy
DefaultCSIImagePullPolicy = string(corev1.PullIfNotPresent)
@@ -726,8 +729,8 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
CephFSDriverName = fmt.Sprintf("%s.cephfs.csi.ceph.com", r.opConfig.OperatorNamespace)
NFSDriverName = fmt.Sprintf("%s.nfs.csi.ceph.com", r.opConfig.OperatorNamespace)
- if !EnableRBD {
- logger.Info("CSI Ceph RBD driver disabled")
+ if !EnableRBD || EnableCSIOperator() {
+ logger.Debugf("either EnableRBD if `false` or EnableCSIOperator is `true`, `EnableRBD is %t` and `EnableCSIOperator is %t", EnableRBD, EnableCSIOperator())
err := r.deleteCSIDriverResources(ver, CsiRBDPlugin, csiRBDProvisioner, "csi-rbdplugin-metrics", RBDDriverName)
if err != nil {
return errors.Wrap(err, "failed to remove CSI Ceph RBD driver")
@@ -735,8 +738,8 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
logger.Info("successfully removed CSI Ceph RBD driver")
}
- if !EnableCephFS {
- logger.Info("CSI CephFS driver disabled")
+ if !EnableCephFS || EnableCSIOperator() {
+ logger.Debugf("either EnableCephFS if `false` or EnableCSIOperator is `true`, `EnableCephFS is %t` and `EnableCSIOperator is %t", EnableRBD, EnableCSIOperator())
err := r.deleteCSIDriverResources(ver, CsiCephFSPlugin, csiCephFSProvisioner, "csi-cephfsplugin-metrics", CephFSDriverName)
if err != nil {
return errors.Wrap(err, "failed to remove CSI CephFS driver")
@@ -744,8 +747,8 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
logger.Info("successfully removed CSI CephFS driver")
}
- if !EnableNFS {
- logger.Info("CSI NFS driver disabled")
+ if !EnableNFS || EnableCSIOperator() {
+ logger.Debugf("either EnableNFS if `false` or EnableCSIOperator is `true`, `EnableNFS is %t` and `EnableCSIOperator is %t", EnableRBD, EnableCSIOperator())
err := r.deleteCSIDriverResources(ver, CsiNFSPlugin, csiNFSProvisioner, "csi-nfsplugin-metrics", NFSDriverName)
if err != nil {
return errors.Wrap(err, "failed to remove CSI NFS driver")
@@ -756,6 +759,48 @@ func (r *ReconcileCSI) stopDrivers(ver *version.Info) error {
return nil
}
+func (r *ReconcileCSI) deleteCSIOperatorResources(clusterNamespace string, deleteOp bool) {
+ csiCephConnection := &csiopv1a1.CephConnection{}
+
+ err := r.client.DeleteAllOf(r.opManagerContext, csiCephConnection, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
+ if err != nil && !kerrors.IsNotFound(err) {
+ logger.Errorf("failed to delete CSI-operator Ceph Connection %q. %v", csiCephConnection.Name, err)
+ } else {
+ logger.Infof("deleted CSI-operator Ceph Connection %q", csiCephConnection.Name)
+ }
+
+ csiOpClientProfile := &csiopv1a1.ClientProfile{}
+ err = r.client.DeleteAllOf(r.opManagerContext, csiOpClientProfile, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: clusterNamespace}})
+ if err != nil && !kerrors.IsNotFound(err) {
+ logger.Errorf("failed to delete CSI-operator client profile %q. %v", csiOpClientProfile.Name, err)
+ } else {
+ logger.Infof("deleted CSI-operator client profile %q", csiOpClientProfile.Name)
+ }
+
+ err = r.deleteImageSetConfigMap()
+ if err != nil && !kerrors.IsNotFound(err) {
+ logger.Error("failed to delete imageSetConfigMap", err)
+ }
+
+ if deleteOp {
+ csiDriver := &csiopv1a1.Driver{}
+ err = r.client.DeleteAllOf(r.opManagerContext, csiDriver, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
+ if err != nil && !kerrors.IsNotFound(err) {
+ logger.Errorf("failed to delete CSI-operator driver config %q. %v", csiDriver.Name, err)
+ } else {
+ logger.Infof("deleted CSI-operator driver config %q", csiDriver.Name)
+ }
+
+ opConfig := &csiopv1a1.OperatorConfig{}
+ err = r.client.DeleteAllOf(r.opManagerContext, opConfig, &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Namespace: r.opConfig.OperatorNamespace}})
+ if err != nil && !kerrors.IsNotFound(err) {
+ logger.Errorf("failed to delete CSI-operator operator config %q. %v", opConfig.Name, err)
+ } else {
+ logger.Infof("deleted CSI-operator operator config %q", opConfig.Name)
+ }
+ }
+}
+
func (r *ReconcileCSI) deleteCSIDriverResources(ver *version.Info, daemonset, deployment, service, driverName string) error {
csiDriverobj := v1CsiDriver{}
err := k8sutil.DeleteDaemonset(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace, daemonset)
@@ -773,10 +818,13 @@ func (r *ReconcileCSI) deleteCSIDriverResources(ver *version.Info, daemonset, de
return errors.Wrapf(err, "failed to delete the %q", service)
}
- err = csiDriverobj.deleteCSIDriverInfo(r.opManagerContext, r.context.Clientset, driverName)
- if err != nil {
- return errors.Wrapf(err, "failed to delete %q Driver Info", driverName)
+ if !EnableCSIOperator() {
+ err = csiDriverobj.deleteCSIDriverInfo(r.opManagerContext, r.context.Clientset, driverName)
+ if err != nil {
+ return errors.Wrapf(err, "failed to delete %q Driver Info", driverName)
+ }
}
+
return nil
}
@@ -827,6 +875,10 @@ func (r *ReconcileCSI) validateCSIVersion(ownerInfo *k8sutil.OwnerInfo) (*CephCS
job.Spec.Template.Spec.Affinity = &corev1.Affinity{
NodeAffinity: getNodeAffinity(r.opConfig.Parameters, provisionerNodeAffinityEnv, &corev1.NodeAffinity{}),
}
+ if r.firstCephCluster != nil {
+ cephv1.GetCmdReporterAnnotations(r.firstCephCluster.Annotations).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
+ cephv1.GetCmdReporterLabels(r.firstCephCluster.Labels).ApplyToObjectMeta(&job.Spec.Template.ObjectMeta)
+ }
stdout, _, retcode, err := versionReporter.Run(r.opManagerContext, timeout)
if err != nil {
@@ -1092,3 +1144,7 @@ func getPrefixFromArg(arg string) (string, bool) {
}
return "", false
}
+
+func EnableCSIOperator() bool {
+ return enableCSIOperator && !IsHolderEnabled()
+}
diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
index d8ed72575272..ed25616151ed 100644
--- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
+++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml
@@ -38,9 +38,7 @@ spec:
- "--extra-create-metadata=true"
- "--prevent-volume-mode-conversion=true"
- "--feature-gates=HonorPVReclaimPolicy=true"
- {{ if .EnableCSITopology }}
- - "--feature-gates=Topology=true"
- {{ end }}
+ - "--feature-gates=Topology={{ .EnableCSITopology }}"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
diff --git a/pkg/operator/ceph/csi/util_test.go b/pkg/operator/ceph/csi/util_test.go
index 28b4f8896d10..2308d426e14d 100644
--- a/pkg/operator/ceph/csi/util_test.go
+++ b/pkg/operator/ceph/csi/util_test.go
@@ -284,7 +284,7 @@ func Test_getImage(t *testing.T) {
args: args{
data: map[string]string{},
settingName: "ROOK_CSI_CEPH_IMAGE",
- defaultImage: "quay.io/cephcsi/cephcsi:v3.11.0",
+ defaultImage: "quay.io/cephcsi/cephcsi:v3.12.0",
},
want: DefaultCSIPluginImage,
},
diff --git a/pkg/operator/ceph/csi/version.go b/pkg/operator/ceph/csi/version.go
index a3447f4bf4e1..aa4d9bb26050 100644
--- a/pkg/operator/ceph/csi/version.go
+++ b/pkg/operator/ceph/csi/version.go
@@ -26,13 +26,13 @@ import (
var (
//minimum supported version is 3.10.0
- minimum = CephCSIVersion{3, 10, 0}
+ minimum = CephCSIVersion{3, 11, 0}
//supportedCSIVersions are versions that rook supports
- releasev311 = CephCSIVersion{3, 11, 0}
+ releasev312 = CephCSIVersion{3, 12, 0}
supportedCSIVersions = []CephCSIVersion{
minimum,
- releasev311,
+ releasev312,
}
// for parsing the output of `cephcsi`
diff --git a/pkg/operator/ceph/csi/version_test.go b/pkg/operator/ceph/csi/version_test.go
index 9ccb79d4b2e0..7bd518c6b0d6 100644
--- a/pkg/operator/ceph/csi/version_test.go
+++ b/pkg/operator/ceph/csi/version_test.go
@@ -23,13 +23,14 @@ import (
)
var (
- testMinVersion = CephCSIVersion{3, 10, 0}
+ testMinVersion = CephCSIVersion{3, 11, 0}
testReleaseV390 = CephCSIVersion{3, 9, 0}
testReleaseV391 = CephCSIVersion{3, 9, 1}
testreleasev310 = CephCSIVersion{3, 10, 0}
testReleaseV3101 = CephCSIVersion{3, 10, 1}
testReleaseV3102 = CephCSIVersion{3, 10, 2}
testReleaseV3110 = CephCSIVersion{3, 11, 0}
+ testReleaseV3120 = CephCSIVersion{3, 12, 0}
testVersionUnsupported = CephCSIVersion{4, 0, 0}
)
@@ -68,6 +69,10 @@ func TestIsAtLeast(t *testing.T) {
ret = testReleaseV3110.isAtLeast(&testReleaseV3110)
assert.Equal(t, true, ret)
+ // Test for 3.12.0
+ ret = testReleaseV3120.isAtLeast(&testReleaseV3120)
+ assert.Equal(t, true, ret)
+
}
func TestSupported(t *testing.T) {
@@ -86,13 +91,16 @@ func TestSupported(t *testing.T) {
assert.Equal(t, false, ret)
ret = testreleasev310.Supported()
- assert.Equal(t, true, ret)
+ assert.Equal(t, false, ret)
ret = testReleaseV3101.Supported()
- assert.Equal(t, true, ret)
+ assert.Equal(t, false, ret)
ret = testReleaseV3110.Supported()
assert.Equal(t, true, ret)
+
+ ret = testReleaseV3120.Supported()
+ assert.Equal(t, true, ret)
}
func Test_extractCephCSIVersion(t *testing.T) {
diff --git a/pkg/operator/ceph/file/subvolumegroup/controller.go b/pkg/operator/ceph/file/subvolumegroup/controller.go
index cf7fb7564f08..5e7cb80f7ef9 100644
--- a/pkg/operator/ceph/file/subvolumegroup/controller.go
+++ b/pkg/operator/ceph/file/subvolumegroup/controller.go
@@ -25,6 +25,7 @@ import (
"syscall"
"time"
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
"github.com/pkg/errors"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/util/exec"
@@ -106,6 +107,11 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}
+ err = csiopv1a1.AddToScheme(mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
return nil
}
@@ -265,6 +271,14 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) reconcile(request reconcile.Requ
}
r.updateStatus(observedGeneration, request.NamespacedName, cephv1.ConditionReady)
+
+ if csi.EnableCSIOperator() {
+ err = csi.CreateUpdateClientProfileSubVolumeGroup(r.clusterInfo.Context, r.client, r.clusterInfo, cephFilesystemNamespacedName, buildClusterID(cephFilesystemSubVolumeGroup), cephCluster.Name)
+ if err != nil {
+ return reconcile.Result{}, errors.Wrap(err, "failed to create ceph csi-op config CR for subVolGrp ns")
+ }
+ }
+
// Return and do not requeue
logger.Debugf("done reconciling cephFilesystemSubVolumeGroup %q", namespacedName)
return reconcile.Result{}, nil
diff --git a/pkg/operator/ceph/object/admin.go b/pkg/operator/ceph/object/admin.go
index 9cc3b3612e7b..5e5a2596f515 100644
--- a/pkg/operator/ceph/object/admin.go
+++ b/pkg/operator/ceph/object/admin.go
@@ -116,7 +116,7 @@ func NewMultisiteContext(context *clusterd.Context, clusterInfo *cephclient.Clus
objContext := NewContext(context, clusterInfo, store.Name)
objContext.UID = string(store.UID)
- if err := UpdateEndpoint(objContext, store); err != nil {
+ if err := UpdateEndpointForAdminOps(objContext, store); err != nil {
return nil, err
}
@@ -131,16 +131,26 @@ func NewMultisiteContext(context *clusterd.Context, clusterInfo *cephclient.Clus
return objContext, nil
}
-// UpdateEndpoint updates an object.Context using the latest info from the CephObjectStore spec
-func UpdateEndpoint(objContext *Context, store *cephv1.CephObjectStore) error {
- nsName := fmt.Sprintf("%s/%s", objContext.clusterInfo.Namespace, objContext.Name)
+// GetAdminOpsEndpoint returns an endpoint that can be used to perform RGW admin ops
+func GetAdminOpsEndpoint(s *cephv1.CephObjectStore) (string, error) {
+ nsName := fmt.Sprintf("%s/%s", s.Namespace, s.Name)
- port, err := store.Spec.GetPort()
+ // advertise endpoint should be most likely to have a valid cert, so use it for admin ops
+ endpoint, err := s.GetAdvertiseEndpointUrl()
if err != nil {
- return errors.Wrapf(err, "failed to get port for object store %q", nsName)
+ return "", errors.Wrapf(err, "failed to get advertise endpoint for object store %q", nsName)
}
- objContext.Endpoint = BuildDNSEndpoint(GetDomainName(store), port, store.Spec.IsTLSEnabled())
+ return endpoint, nil
+}
+// UpdateEndpointForAdminOps updates the object.Context endpoint with the latest admin ops endpoint
+// for the CephObjectStore.
+func UpdateEndpointForAdminOps(objContext *Context, store *cephv1.CephObjectStore) error {
+ endpoint, err := GetAdminOpsEndpoint(store)
+ if err != nil {
+ return err
+ }
+ objContext.Endpoint = endpoint
return nil
}
diff --git a/pkg/operator/ceph/object/admin_test.go b/pkg/operator/ceph/object/admin_test.go
index a93ed0ef4960..122b4ce86ef1 100644
--- a/pkg/operator/ceph/object/admin_test.go
+++ b/pkg/operator/ceph/object/admin_test.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/pkg/errors"
+ cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
@@ -29,6 +30,7 @@ import (
"github.com/rook/rook/pkg/util/exec"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestExtractJson(t *testing.T) {
@@ -730,3 +732,135 @@ const secondPeriodUpdateWithChanges = `{
"realm_name": "my-store",
"realm_epoch": 3
}`
+
+func TestGetAdminOpsEndpoint(t *testing.T) {
+ s := &cephv1.CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-store",
+ Namespace: "my-ns",
+ },
+ Spec: cephv1.ObjectStoreSpec{
+ Gateway: cephv1.GatewaySpec{},
+ Hosting: &cephv1.ObjectStoreHostingSpec{
+ // dnsNames shouldn't affect admin ops endpoints
+ DNSNames: []string{"should.not.appear"},
+ },
+ },
+ }
+
+ t.Run("internal", func(t *testing.T) {
+ t.Run("port", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.Port = 8080
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "http://rook-ceph-rgw-my-store.my-ns.svc:8080", got)
+ })
+
+ t.Run("securePort, no cert", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.SecurePort = 8443
+ got, err := GetAdminOpsEndpoint(s)
+ assert.Error(t, err)
+ assert.Equal(t, "", got)
+ })
+
+ t.Run("securePort", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "https://rook-ceph-rgw-my-store.my-ns.svc:8443", got)
+ })
+
+ t.Run("port + securePort", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.Port = 8080
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "https://rook-ceph-rgw-my-store.my-ns.svc:8443", got)
+ })
+ })
+
+ t.Run("external", func(t *testing.T) {
+ t.Run("port", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.ExternalRgwEndpoints = []cephv1.EndpointAddress{
+ {IP: "192.168.1.1"},
+ {Hostname: "s3.host.com"},
+ }
+ s.Spec.Gateway.Port = 8080
+
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "http://192.168.1.1:8080", got)
+ })
+
+ t.Run("securePort, no cert", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.ExternalRgwEndpoints = []cephv1.EndpointAddress{
+ {IP: "192.168.1.1"},
+ {Hostname: "s3.host.com"},
+ }
+ s.Spec.Gateway.SecurePort = 8443
+
+ got, err := GetAdminOpsEndpoint(s)
+ assert.Error(t, err)
+ assert.Equal(t, "", got)
+ })
+
+ t.Run("securePort", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.ExternalRgwEndpoints = []cephv1.EndpointAddress{
+ {IP: "192.168.1.1"},
+ {Hostname: "s3.host.com"},
+ }
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "https://192.168.1.1:8443", got)
+ })
+
+ t.Run("port + securePort", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.ExternalRgwEndpoints = []cephv1.EndpointAddress{
+ {IP: "192.168.1.1"},
+ {Hostname: "s3.host.com"},
+ }
+ s.Spec.Gateway.Port = 8080
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "https://192.168.1.1:8443", got)
+ })
+ })
+
+ t.Run("advertise", func(t *testing.T) {
+ t.Run("port + securePort", func(t *testing.T) {
+ s := s.DeepCopy()
+ s.Spec.Gateway.ExternalRgwEndpoints = []cephv1.EndpointAddress{
+ {IP: "192.168.1.1"},
+ {Hostname: "s3.host.com"},
+ }
+ s.Spec.Gateway.Port = 8080
+ s.Spec.Gateway.SecurePort = 8443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+ s.Spec.Hosting.AdvertiseEndpoint = &cephv1.ObjectEndpointSpec{
+ DnsName: "advertise.me",
+ Port: 80,
+ UseTls: false,
+ }
+
+ got, err := GetAdminOpsEndpoint(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "http://advertise.me:80", got)
+ })
+ })
+}
diff --git a/pkg/operator/ceph/object/bucket/provisioner.go b/pkg/operator/ceph/object/bucket/provisioner.go
index b9b4c41b63c2..c42af8a5fc3b 100644
--- a/pkg/operator/ceph/object/bucket/provisioner.go
+++ b/pkg/operator/ceph/object/bucket/provisioner.go
@@ -504,23 +504,21 @@ func (p *Provisioner) setObjectContext() error {
// setObjectStoreDomainName sets the provisioner.storeDomainName and provisioner.port
// must be called after setObjectStoreName and setObjectStoreNamespace
-func (p *Provisioner) setObjectStoreDomainName(sc *storagev1.StorageClass) error {
+func (p *Provisioner) setObjectStoreDomainNameAndPort(sc *storagev1.StorageClass) error {
// make sure the object store actually exists
store, err := p.getObjectStore()
if err != nil {
return err
}
- p.storeDomainName = object.GetDomainName(store)
- return nil
-}
-func (p *Provisioner) setObjectStorePort() error {
- store, err := p.getObjectStore()
+ domainName, port, _, err := store.GetAdvertiseEndpoint()
if err != nil {
- return errors.Wrap(err, "failed to get cephObjectStore")
+ return errors.Wrapf(err, `failed to get advertise endpoint for CephObjectStore "%s/%s"`, p.clusterInfo.Namespace, p.objectStoreName)
}
- p.storePort, err = store.Spec.GetPort()
- return err
+ p.storeDomainName = domainName
+ p.storePort = port
+
+ return nil
}
func (p *Provisioner) setObjectStoreName(sc *storagev1.StorageClass) {
@@ -560,12 +558,9 @@ func (p *Provisioner) populateDomainAndPort(sc *storagev1.StorageClass) error {
}
// If no endpoint exists let's see if CephObjectStore exists
} else {
- if err := p.setObjectStoreDomainName(sc); err != nil {
+ if err := p.setObjectStoreDomainNameAndPort(sc); err != nil {
return errors.Wrap(err, "failed to set object store domain name")
}
- if err := p.setObjectStorePort(); err != nil {
- return errors.Wrap(err, "failed to set object store port")
- }
}
return nil
@@ -679,8 +674,10 @@ func (p *Provisioner) setAdminOpsAPIClient() error {
return errors.Wrap(err, "failed to retrieve rgw admin ops user")
}
- // Build endpoint
- s3endpoint := object.BuildDNSEndpoint(object.GetDomainName(cephObjectStore), p.storePort, cephObjectStore.Spec.IsTLSEnabled())
+ s3endpoint, err := object.GetAdminOpsEndpoint(cephObjectStore)
+ if err != nil {
+ return errors.Wrapf(err, "failed to retrieve admin ops endpoint")
+ }
// If DEBUG level is set we will mutate the HTTP client for printing request and response
if logger.LevelAt(capnslog.DEBUG) {
diff --git a/pkg/operator/ceph/object/config.go b/pkg/operator/ceph/object/config.go
index 8ebe1237a995..31082d7bf676 100644
--- a/pkg/operator/ceph/object/config.go
+++ b/pkg/operator/ceph/object/config.go
@@ -110,7 +110,50 @@ func (c *clusterConfig) generateKeyring(rgwConfig *rgwConfig) (string, error) {
return keyring, s.CreateOrUpdate(rgwConfig.ResourceName, keyring)
}
+func mapKeystoneSecretToConfig(cfg map[string]string, secret *v1.Secret) (map[string]string, error) {
+
+ requiredKeys := []string{"OS_PROJECT_DOMAIN_NAME",
+ "OS_USER_DOMAIN_NAME",
+ "OS_PROJECT_DOMAIN_NAME",
+ "OS_USER_DOMAIN_NAME",
+ "OS_PROJECT_NAME",
+ "OS_USERNAME",
+ "OS_PASSWORD"}
+
+ data := make(map[string]string)
+ for key, value := range secret.Data {
+ data[key] = string(value[:])
+ }
+
+ for _, key := range requiredKeys {
+ if value, ok := data[key]; !ok || value == "" {
+ return nil, errors.New(fmt.Sprintf("Missing or empty %s", key))
+ }
+ }
+
+ if authType, ok := data["OS_AUTH_TYPE"]; ok && authType != "password" {
+ return nil, errors.New(fmt.Sprintf("OS_AUTHTYPE %s is not supported. Only OS_AUTH_TYPE password is supported!", authType))
+ }
+
+ if apiVersion, ok := data["OS_IDENTITY_API_VERSION"]; ok && apiVersion != "3" {
+ return nil, errors.New(fmt.Sprintf("OS_IDENTITY_API_VERSION %s is not supported! Only OS_IDENTITY_API_VERSION 3 is supported!", apiVersion))
+ }
+
+ if data["OS_PROJECT_DOMAIN_NAME"] != data["OS_USER_DOMAIN_NAME"] {
+ return nil, errors.New("The user domain name does not match the project domain name.")
+ }
+
+ cfg["rgw_keystone_api_version"] = data["OS_IDENTITY_API_VERSION"]
+ cfg["rgw_keystone_admin_domain"] = data["OS_PROJECT_DOMAIN_NAME"]
+ cfg["rgw_keystone_admin_project"] = data["OS_PROJECT_NAME"]
+ cfg["rgw_keystone_admin_user"] = data["OS_USERNAME"]
+ cfg["rgw_keystone_admin_password"] = data["OS_PASSWORD"]
+
+ return cfg, nil
+}
+
func (c *clusterConfig) setFlagsMonConfigStore(rgwConfig *rgwConfig) error {
+
monStore := cephconfig.GetMonStore(c.context, c.clusterInfo)
who := generateCephXUser(rgwConfig.ResourceName)
configOptions := make(map[string]string)
@@ -126,6 +169,60 @@ func (c *clusterConfig) setFlagsMonConfigStore(rgwConfig *rgwConfig) error {
configOptions["rgw_zone"] = rgwConfig.Zone
configOptions["rgw_zonegroup"] = rgwConfig.ZoneGroup
+ configOptions, err := configureKeystoneAuthentication(rgwConfig, configOptions)
+ if err != nil {
+ return err
+ }
+
+ s3disabled := false
+ if s3 := rgwConfig.Protocols.S3; s3 != nil {
+ if s3.Enabled != nil && !*s3.Enabled {
+ s3disabled = true
+ }
+
+ if s3.AuthUseKeystone != nil {
+ configOptions["rgw_s3_auth_use_keystone"] = fmt.Sprintf("%t", *s3.AuthUseKeystone)
+ }
+
+ }
+
+ if swift := rgwConfig.Protocols.Swift; swift != nil {
+
+ if swift.AccountInUrl != nil {
+ configOptions["rgw_swift_account_in_url"] = fmt.Sprintf("%t", *swift.AccountInUrl)
+ }
+
+ if swift.UrlPrefix != nil {
+ configOptions["rgw_swift_url_prefix"] = *swift.UrlPrefix
+
+ if configOptions["rgw_swift_url_prefix"] == "/" {
+ logger.Warning("Forcefully disabled S3 as the swift prefix is given as a slash /. Ignoring any S3 options (including Enabled=true)!")
+ // this will later on disable the s3 api using the rgw_enable_apis setting
+ s3disabled = true
+ }
+
+ }
+ if swift.VersioningEnabled != nil {
+ configOptions["rgw_swift_versioning_enabled"] = fmt.Sprintf("%t", *swift.VersioningEnabled)
+ }
+
+ }
+
+ if s3disabled {
+ // XXX: how to handle enabled APIs? We only configure s3 and
+ // swift in the resource, `admin` is required for the operator to
+ // work, `swift_auth` is required to access swift without keystone
+ // – not sure about the additional APIs
+ // https://docs.ceph.com/en/quincy/radosgw/config-ref/#confval-rgw_enable_apis
+ // see also https://docs.ceph.com/en/octopus/radosgw/config-ref/#swift-settings on disabling s3
+ // when using '/' as prefix
+
+ // Swift was enabled so far already by default, so perhaps better
+ // not change that if someone relies on it.
+
+ configOptions["rgw_enable_apis"] = "s3website, swift, swift_auth, admin, sts, iam, notifications"
+ }
+
for flag, val := range configOptions {
err := monStore.Set(who, flag, val)
if err != nil {
@@ -136,6 +233,54 @@ func (c *clusterConfig) setFlagsMonConfigStore(rgwConfig *rgwConfig) error {
return nil
}
+func configureKeystoneAuthentication(rgwConfig *rgwConfig, configOptions map[string]string) (map[string]string, error) {
+
+ keystone := rgwConfig.Auth.Keystone
+ if keystone == nil {
+ logger.Debug("Authentication with keystone is disabled")
+ return configOptions, nil
+ }
+
+ logger.Info("Configuring authentication with keystone")
+
+ configOptions["rgw_keystone_url"] = keystone.Url
+ configOptions["rgw_keystone_accepted_roles"] = strings.Join(keystone.AcceptedRoles, ",")
+ if keystone.ImplicitTenants != "" {
+ lc := strings.ToLower(string(keystone.ImplicitTenants))
+
+ // only four values are valid here (swift, s3, true and false)
+ //
+ // https://docs.ceph.com/en/latest/radosgw/keystone/#integrating-with-openstack-keystone
+ if lc != "true" &&
+ lc != "false" &&
+ lc != "swift" &&
+ lc != "s3" {
+
+ return nil, errors.New(fmt.Sprintf("ImplicitTenantSetting can only be 'swift', 's3', 'true' or 'false', not %q", string(keystone.ImplicitTenants)))
+
+ }
+
+ configOptions["rgw_keystone_implicit_tenants"] = lc
+
+ }
+
+ if keystone.TokenCacheSize != nil {
+ configOptions["rgw_keystone_token_cache_size"] = fmt.Sprintf("%d", *keystone.TokenCacheSize)
+ }
+
+ if rgwConfig.KeystoneSecret == nil {
+ return nil, errors.New("Cannot find keystone secret")
+ }
+
+ var err error
+ configOptions, err = mapKeystoneSecretToConfig(configOptions, rgwConfig.KeystoneSecret)
+ if err != nil {
+ return nil, errors.Wrap(err, fmt.Sprintf("error mapping keystone secret %s to config", rgwConfig.KeystoneSecret.Name))
+ }
+
+ return configOptions, nil
+}
+
func (c *clusterConfig) deleteFlagsMonConfigStore(rgwName string) error {
monStore := cephconfig.GetMonStore(c.context, c.clusterInfo)
who := generateCephXUser(rgwName)
diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go
index 95880679abc1..651978ec9204 100644
--- a/pkg/operator/ceph/object/controller.go
+++ b/pkg/operator/ceph/object/controller.go
@@ -405,7 +405,7 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c
}
}
- if err := UpdateEndpoint(objContext, cephObjectStore); err != nil {
+ if err := UpdateEndpointForAdminOps(objContext, cephObjectStore); err != nil {
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to set endpoint", err)
}
} else {
@@ -437,7 +437,7 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to reconcile service", err)
}
- if err := UpdateEndpoint(objContext, cephObjectStore); err != nil {
+ if err := UpdateEndpointForAdminOps(objContext, cephObjectStore); err != nil {
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to set endpoint", err)
}
@@ -459,8 +459,17 @@ func (r *ReconcileCephObjectStore) reconcileCreateObjectStore(cephObjectStore *c
return r.setFailedStatus(k8sutil.ObservedGenerationNotAvailable, namespacedName, "failed to configure multisite for object store", err)
}
- // Create or Update Store
- err = cfg.createOrUpdateStore(realmName, zoneGroupName, zoneName)
+ // Retrieve the keystone secret if specified
+ var keystoneSecret *corev1.Secret
+ if ks := cephObjectStore.Spec.Auth.Keystone; ks != nil {
+ keystoneSecret, err = objContext.Context.Clientset.CoreV1().Secrets(objContext.clusterInfo.Namespace).Get(objContext.clusterInfo.Context, ks.ServiceUserSecretName, metav1.GetOptions{})
+ if err != nil {
+ return reconcile.Result{}, errors.Wrapf(err, "failed to get the keystone credential secret")
+ }
+ }
+
+ // Create or Update store
+ err = cfg.createOrUpdateStore(realmName, zoneGroupName, zoneName, keystoneSecret)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to create object store %q", cephObjectStore.Name)
}
diff --git a/pkg/operator/ceph/object/rgw.go b/pkg/operator/ceph/object/rgw.go
index f41ecff1f012..558100bf42ea 100644
--- a/pkg/operator/ceph/object/rgw.go
+++ b/pkg/operator/ceph/object/rgw.go
@@ -19,7 +19,6 @@ package object
import (
"fmt"
- "math/rand"
"net/http"
"os"
"reflect"
@@ -62,6 +61,10 @@ type rgwConfig struct {
Realm string
ZoneGroup string
Zone string
+
+ Auth cephv1.AuthSpec
+ KeystoneSecret *v1.Secret
+ Protocols cephv1.ProtocolSpec
}
var updateDeploymentAndWait = mon.UpdateCephDeploymentAndWait
@@ -70,10 +73,10 @@ var (
insecureSkipVerify = "insecureSkipVerify"
)
-func (c *clusterConfig) createOrUpdateStore(realmName, zoneGroupName, zoneName string) error {
+func (c *clusterConfig) createOrUpdateStore(realmName, zoneGroupName, zoneName string, keystoneSecret *v1.Secret) error {
logger.Infof("creating object store %q in namespace %q", c.store.Name, c.store.Namespace)
- if err := c.startRGWPods(realmName, zoneGroupName, zoneName); err != nil {
+ if err := c.startRGWPods(realmName, zoneGroupName, zoneName, keystoneSecret); err != nil {
return errors.Wrap(err, "failed to start rgw pods")
}
@@ -95,7 +98,7 @@ func (c *clusterConfig) createOrUpdateStore(realmName, zoneGroupName, zoneName s
return nil
}
-func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string) error {
+func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string, keystoneSecret *v1.Secret) error {
// backward compatibility, triggered during updates
if c.store.Spec.Gateway.Instances < 1 {
// Set the minimum of at least one instance
@@ -127,11 +130,14 @@ func (c *clusterConfig) startRGWPods(realmName, zoneGroupName, zoneName string)
resourceName := fmt.Sprintf("%s-%s-%s", AppName, c.store.Name, daemonLetterID)
rgwConfig := &rgwConfig{
- ResourceName: resourceName,
- DaemonID: daemonName,
- Realm: realmName,
- ZoneGroup: zoneGroupName,
- Zone: zoneName,
+ ResourceName: resourceName,
+ DaemonID: daemonName,
+ Realm: realmName,
+ ZoneGroup: zoneGroupName,
+ Zone: zoneName,
+ Auth: c.store.Spec.Auth,
+ Protocols: c.store.Spec.Protocols,
+ KeystoneSecret: keystoneSecret,
}
// We set the owner reference of the Secret to the Object controller instead of the replicaset
@@ -331,39 +337,11 @@ func EmptyPool(pool cephv1.PoolSpec) bool {
return reflect.DeepEqual(pool, cephv1.PoolSpec{})
}
-// GetDomainName build the dns name to reach out the service endpoint
-func GetDomainName(s *cephv1.CephObjectStore) string {
- return getDomainName(s, true)
-}
-
func GetStableDomainName(s *cephv1.CephObjectStore) string {
- return getDomainName(s, false)
-}
-
-func getDomainName(s *cephv1.CephObjectStore, returnRandomDomainIfMultiple bool) string {
- endpoints := []string{}
- if s.Spec.IsExternal() {
- // if the store is external, pick a random endpoint to use. if the endpoint is down, this
- // reconcile may fail, but a future reconcile will eventually pick a different endpoint to try
- for _, e := range s.Spec.Gateway.ExternalRgwEndpoints {
- endpoints = append(endpoints, e.String())
- }
- } else if s.Spec.Hosting != nil && len(s.Spec.Hosting.DNSNames) > 0 {
- // if the store is internal and has DNS names, pick a random DNS name to use
- endpoints = s.Spec.Hosting.DNSNames
- } else {
- return domainNameOfService(s)
- }
-
- idx := 0
- if returnRandomDomainIfMultiple {
- idx = rand.Intn(len(endpoints)) //nolint:gosec // G404: cryptographically weak RNG is fine here
+ if !s.Spec.IsExternal() {
+ return s.GetServiceDomainName()
}
- return endpoints[idx]
-}
-
-func domainNameOfService(s *cephv1.CephObjectStore) string {
- return fmt.Sprintf("%s-%s.%s.%s", AppName, s.Name, s.Namespace, svcDNSSuffix)
+ return s.Spec.Gateway.ExternalRgwEndpoints[0].String()
}
func getAllDomainNames(s *cephv1.CephObjectStore) []string {
@@ -376,7 +354,9 @@ func getAllDomainNames(s *cephv1.CephObjectStore) []string {
return domains
}
- return []string{domainNameOfService(s)}
+ // do not return hosting.dnsNames in this list because Rook has no way of knowing for sure how
+ // they can be used. some might be TLS-only or non-TLS, or inaccessible from k8s
+ return []string{s.GetServiceDomainName()}
}
func getAllDNSEndpoints(s *cephv1.CephObjectStore, port int32, secure bool) []string {
diff --git a/pkg/operator/ceph/object/rgw_test.go b/pkg/operator/ceph/object/rgw_test.go
index 2723ca5a3a5b..2d0ddaae1a0a 100644
--- a/pkg/operator/ceph/object/rgw_test.go
+++ b/pkg/operator/ceph/object/rgw_test.go
@@ -18,6 +18,7 @@ package object
import (
"context"
+ "strings"
"testing"
"time"
@@ -82,7 +83,7 @@ func TestStartRGW(t *testing.T) {
t.Run("Deployment is created", func(t *testing.T) {
store.Spec.Gateway.Instances = 1
- err := c.startRGWPods(store.Name, store.Name, store.Name)
+ err := c.startRGWPods(store.Name, store.Name, store.Name, nil)
assert.Nil(t, err)
validateStart(ctx, t, c, clientset)
@@ -95,7 +96,7 @@ func TestStartRGW(t *testing.T) {
// Purge store of configurations applied to gateways
appliedRgwConfigurations = make(map[string]string)
- err := c.startRGWPods(store.Name, store.Name, store.Name)
+ err := c.startRGWPods(store.Name, store.Name, store.Name, nil)
assert.Nil(t, err)
assert.Contains(t, appliedRgwConfigurations, "rgw_run_sync_thread")
@@ -109,7 +110,7 @@ func TestStartRGW(t *testing.T) {
// Purge store of configurations applied to gateways
appliedRgwConfigurations = make(map[string]string)
- err := c.startRGWPods(store.Name, store.Name, store.Name)
+ err := c.startRGWPods(store.Name, store.Name, store.Name, nil)
assert.Nil(t, err)
assert.Contains(t, appliedRgwConfigurations, "rgw_run_sync_thread")
@@ -139,9 +140,20 @@ func TestCreateObjectStore(t *testing.T) {
}
return "", nil
}
+
+ timeoutCommand := func(timeout time.Duration, command string, args ...string) (string, error) {
+ logger.Infof("Command: %s %v", command, args)
+ for _, arg := range args {
+ assert.False(t, strings.Contains(arg, "swift"))
+ assert.False(t, strings.Contains(arg, "keystone"))
+ }
+ return "", nil
+ }
+
executor := &exectest.MockExecutor{
MockExecuteCommandWithCombinedOutput: commandWithOutputFunc,
MockExecuteCommandWithOutput: commandWithOutputFunc,
+ MockExecuteCommandWithTimeout: timeoutCommand,
}
store := simpleStore()
@@ -157,7 +169,7 @@ func TestCreateObjectStore(t *testing.T) {
r := &ReconcileCephObjectStore{client: cl, scheme: s}
ownerInfo := client.NewMinimumOwnerInfoWithOwnerRef()
c := &clusterConfig{context, info, store, "1.2.3.4", &cephv1.ClusterSpec{}, ownerInfo, data, r.client}
- err := c.createOrUpdateStore(store.Name, store.Name, store.Name)
+ err := c.createOrUpdateStore(store.Name, store.Name, store.Name, nil)
assert.Nil(t, err)
}
@@ -172,6 +184,57 @@ func simpleStore() *cephv1.CephObjectStore {
}
}
+func TestCreateObjectStoreWithKeystoneAndS3(t *testing.T) {
+ commandWithOutputFunc := func(command string, args ...string) (string, error) {
+ logger.Infof("Command: %s %v", command, args)
+ if command == "ceph" {
+ if args[1] == "erasure-code-profile" {
+ return `{"k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}`, nil
+ }
+ if args[0] == "auth" && args[1] == "get-or-create-key" {
+ return `{"key":"mykey"}`, nil
+ }
+ } else {
+ return `{"realms": []}`, nil
+ }
+ return "", nil
+ }
+ executor := &exectest.MockExecutor{
+ MockExecuteCommandWithCombinedOutput: commandWithOutputFunc,
+ MockExecuteCommandWithOutput: commandWithOutputFunc,
+ }
+
+ store := simpleStoreWithKeystoneAndS3()
+ clientset := test.New(t, 3)
+ context := &clusterd.Context{Executor: executor, Clientset: clientset}
+ info := clienttest.CreateTestClusterInfo(1)
+ data := config.NewStatelessDaemonDataPathMap(config.RgwType, "my-fs", "rook-ceph", "/var/lib/rook/")
+
+ // create the pools
+ s := scheme.Scheme
+ object := []runtime.Object{&cephv1.CephObjectStore{}}
+ cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build()
+ r := &ReconcileCephObjectStore{client: cl, scheme: s}
+ ownerInfo := client.NewMinimumOwnerInfoWithOwnerRef()
+ c := &clusterConfig{context, info, store, "1.2.3.4", &cephv1.ClusterSpec{}, ownerInfo, data, r.client}
+ err := c.createOrUpdateStore(store.Name, store.Name, store.Name, nil)
+ assert.Nil(t, err)
+}
+
+func simpleStoreWithKeystoneAndS3() *cephv1.CephObjectStore {
+ authUseKeystone := true
+ return &cephv1.CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "mycluster"},
+ Spec: cephv1.ObjectStoreSpec{
+ MetadataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}},
+ DataPool: cephv1.PoolSpec{ErasureCoded: cephv1.ErasureCodedSpec{CodingChunks: 1, DataChunks: 2}},
+ Gateway: cephv1.GatewaySpec{Port: 123},
+ Auth: cephv1.AuthSpec{Keystone: &cephv1.KeystoneSpec{Url: "testurl", ServiceUserSecretName: "testname", AcceptedRoles: []string{"testrole"}}},
+ Protocols: cephv1.ProtocolSpec{S3: &cephv1.S3Spec{AuthUseKeystone: &authUseKeystone}},
+ },
+ }
+}
+
func TestGenerateSecretName(t *testing.T) {
cl := fake.NewClientBuilder().Build()
@@ -202,14 +265,7 @@ func TestEmptyPoolSpec(t *testing.T) {
}
func TestBuildDomainNameAndEndpoint(t *testing.T) {
- s := &cephv1.CephObjectStore{
- ObjectMeta: metav1.ObjectMeta{
- Name: "my-store",
- Namespace: "rook-ceph",
- },
- }
- dns := GetDomainName(s)
- assert.Equal(t, "rook-ceph-rgw-my-store.rook-ceph.svc", dns)
+ dns := "rook-ceph-rgw-my-store.rook-ceph.svc"
// non-secure endpoint
var port int32 = 80
diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go
index 3213cc435c01..701042d950bb 100644
--- a/pkg/operator/ceph/object/spec.go
+++ b/pkg/operator/ceph/object/spec.go
@@ -922,16 +922,26 @@ func renderProbe(cfg rgwProbeConfig) (string, error) {
}
func (c *clusterConfig) addDNSNamesToRGWServer() (string, error) {
- if (c.store.Spec.Hosting == nil) || len(c.store.Spec.Hosting.DNSNames) <= 0 {
+ if c.store.Spec.Hosting == nil {
+ return "", nil
+ }
+ if !c.store.AdvertiseEndpointIsSet() && len(c.store.Spec.Hosting.DNSNames) == 0 {
return "", nil
}
if !c.clusterInfo.CephVersion.IsAtLeastReef() {
return "", errors.New("rgw dns names are supported from ceph v18 onwards")
}
- // add default RGW service name to dns names
- dnsNames := c.store.Spec.Hosting.DNSNames
- dnsNames = append(dnsNames, domainNameOfService(c.store))
+ dnsNames := []string{}
+
+ if c.store.AdvertiseEndpointIsSet() {
+ dnsNames = append(dnsNames, c.store.Spec.Hosting.AdvertiseEndpoint.DnsName)
+ }
+
+ dnsNames = append(dnsNames, c.store.Spec.Hosting.DNSNames...)
+
+ // add default RGW service domain name to ensure RGW doesn't reject it
+ dnsNames = append(dnsNames, c.store.GetServiceDomainName())
// add custom endpoints from zone spec if exists
if c.store.Spec.Zone.Name != "" {
diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go
index 5665fc0902f2..e13df82650a1 100644
--- a/pkg/operator/ceph/object/spec_test.go
+++ b/pkg/operator/ceph/object/spec_test.go
@@ -914,6 +914,9 @@ func TestAddDNSNamesToRGWPodSpec(t *testing.T) {
DataPathMap: data,
}
}
+
+ cephV18 := cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}
+
tests := []struct {
name string
dnsNames []string
@@ -923,18 +926,18 @@ func TestAddDNSNamesToRGWPodSpec(t *testing.T) {
CustomEndpoints []string
wantErr bool
}{
- {"no dns names ceph v18", []string{}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, false},
- {"no dns names with zone ceph v18", []string{}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{}, false},
- {"no dns names with zone and custom endpoints ceph v18", []string{}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
- {"one dns name ceph v18", []string{"my.dns.name"}, "--rgw-dns-name=my.dns.name,rook-ceph-rgw-default.mycluster.svc", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, false},
- {"multiple dns names ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, false},
- {"duplicate dns names ceph v18", []string{"my.dns.name1", "my.dns.name2", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, false},
- {"invalid dns name ceph v18", []string{"!my.invalid-dns.com"}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, true},
- {"mixed invalid and valid dns names ceph v18", []string{"my.dns.name", "!my.invalid-dns.name"}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "", []string{}, true},
- {"dns name with zone without custom endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{}, false},
- {"dns name with zone with custom endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc,my.custom.endpoint1,my.custom.endpoint2", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
- {"dns name with zone with custom invalid endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{"http://my.custom.endpoint:80", "http://!my.invalid-custom.endpoint:80"}, true},
- {"dns name with zone with mixed invalid and valid dnsnames/custom endpoint ceph v18", []string{"my.dns.name", "!my.dns.name"}, "", cephver.CephVersion{Major: 18, Minor: 0, Extra: 0}, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80:80"}, true},
+ {"no dns names ceph v18", []string{}, "", cephV18, "", []string{}, false},
+ {"no dns names with zone ceph v18", []string{}, "", cephV18, "myzone", []string{}, false},
+ {"no dns names with zone and custom endpoints ceph v18", []string{}, "", cephV18, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
+ {"one dns name ceph v18", []string{"my.dns.name"}, "--rgw-dns-name=my.dns.name,rook-ceph-rgw-default.mycluster.svc", cephV18, "", []string{}, false},
+ {"multiple dns names ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephV18, "", []string{}, false},
+ {"duplicate dns names ceph v18", []string{"my.dns.name1", "my.dns.name2", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephV18, "", []string{}, false},
+ {"invalid dns name ceph v18", []string{"!my.invalid-dns.com"}, "", cephV18, "", []string{}, true},
+ {"mixed invalid and valid dns names ceph v18", []string{"my.dns.name", "!my.invalid-dns.name"}, "", cephV18, "", []string{}, true},
+ {"dns name with zone without custom endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc", cephV18, "myzone", []string{}, false},
+ {"dns name with zone with custom endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "--rgw-dns-name=my.dns.name1,my.dns.name2,rook-ceph-rgw-default.mycluster.svc,my.custom.endpoint1,my.custom.endpoint2", cephV18, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80"}, false},
+ {"dns name with zone with custom invalid endpoints ceph v18", []string{"my.dns.name1", "my.dns.name2"}, "", cephV18, "myzone", []string{"http://my.custom.endpoint:80", "http://!my.invalid-custom.endpoint:80"}, true},
+ {"dns name with zone with mixed invalid and valid dnsnames/custom endpoint ceph v18", []string{"my.dns.name", "!my.dns.name"}, "", cephV18, "myzone", []string{"http://my.custom.endpoint1:80", "http://my.custom.endpoint2:80:80"}, true},
{"no dns names ceph v17", []string{}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, false},
{"one dns name ceph v17", []string{"my.dns.name"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
{"multiple dns names ceph v17", []string{"my.dns.name1", "my.dns.name2"}, "", cephver.CephVersion{Major: 17, Minor: 0, Extra: 0}, "", []string{}, true},
@@ -965,9 +968,95 @@ func TestAddDNSNamesToRGWPodSpec(t *testing.T) {
assert.NoError(t, err)
}
assert.Equal(t, tt.expectedDNSArg, res)
-
})
}
+
+ t.Run("advertiseEndpoint http, no dnsNames", func(t *testing.T) {
+ c := setupTest("", cephV18, []string{}, []string{})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 80,
+ },
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ assert.Equal(t, "--rgw-dns-name=my.endpoint.com,rook-ceph-rgw-default.mycluster.svc", res)
+ })
+
+ t.Run("advertiseEndpoint https, no dnsNames", func(t *testing.T) {
+ c := setupTest("", cephV18, []string{}, []string{})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 443,
+ UseTls: true,
+ },
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ assert.Equal(t, "--rgw-dns-name=my.endpoint.com,rook-ceph-rgw-default.mycluster.svc", res)
+ })
+
+ t.Run("advertiseEndpoint is svc", func(t *testing.T) {
+ c := setupTest("", cephV18, []string{}, []string{})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "rook-ceph-rgw-default.mycluster.svc",
+ Port: 443,
+ UseTls: true,
+ },
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ // ensures duplicates are removed
+ assert.Equal(t, "--rgw-dns-name=rook-ceph-rgw-default.mycluster.svc", res)
+ })
+
+ t.Run("advertiseEndpoint https, no dnsNames, with zone custom endpoint", func(t *testing.T) {
+ c := setupTest("my-zone", cephV18, []string{}, []string{"multisite.endpoint.com"})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 443,
+ UseTls: true,
+ },
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ assert.Equal(t, "--rgw-dns-name=my.endpoint.com,rook-ceph-rgw-default.mycluster.svc,multisite.endpoint.com", res)
+ })
+
+ t.Run("advertiseEndpoint https, with dnsNames, with zone custom endpoint", func(t *testing.T) {
+ c := setupTest("my-zone", cephV18, []string{}, []string{"multisite.endpoint.com"})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 443,
+ UseTls: true,
+ },
+ DNSNames: []string{"extra.endpoint.com", "extra.endpoint.net"},
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ assert.Equal(t, "--rgw-dns-name=my.endpoint.com,extra.endpoint.com,extra.endpoint.net,rook-ceph-rgw-default.mycluster.svc,multisite.endpoint.com", res)
+ })
+
+ t.Run("advertiseEndpoint https, with dnsNames, with zone custom endpoint, duplicates", func(t *testing.T) {
+ c := setupTest("my-zone", cephV18, []string{}, []string{"extra.endpoint.com"})
+ c.store.Spec.Hosting = &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 443,
+ UseTls: true,
+ },
+ DNSNames: []string{"my.endpoint.com", "extra.endpoint.com"},
+ }
+ res, err := c.addDNSNamesToRGWServer()
+ assert.NoError(t, err)
+ t.Log(res)
+ assert.Equal(t, "--rgw-dns-name=my.endpoint.com,extra.endpoint.com,rook-ceph-rgw-default.mycluster.svc", res)
+ })
}
func TestGetHostnameFromEndpoint(t *testing.T) {
diff --git a/pkg/operator/ceph/object/status.go b/pkg/operator/ceph/object/status.go
index d64f9ad9c00b..d6d1c0e0f67e 100644
--- a/pkg/operator/ceph/object/status.go
+++ b/pkg/operator/ceph/object/status.go
@@ -18,6 +18,7 @@ package object
import (
"context"
+ "fmt"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
@@ -90,15 +91,28 @@ func updateStatus(ctx context.Context, observedGeneration int64, client client.C
}
func buildStatusInfo(cephObjectStore *cephv1.CephObjectStore) map[string]string {
+ nsName := fmt.Sprintf("%s/%s", cephObjectStore.Namespace, cephObjectStore.Name)
+
m := make(map[string]string)
- if cephObjectStore.Spec.Gateway.SecurePort != 0 && cephObjectStore.Spec.Gateway.Port != 0 {
- m["secureEndpoint"] = BuildDNSEndpoint(GetStableDomainName(cephObjectStore), cephObjectStore.Spec.Gateway.SecurePort, true)
+ advertiseEndpoint, err := cephObjectStore.GetAdvertiseEndpointUrl()
+ if err != nil {
+ // lots of validation happens before this point, so this should be nearly impossible
+ logger.Errorf("failed to get advertise endpoint for CephObjectStore %q to record on status; continuing without this. %v", nsName, err)
+ }
+
+ if cephObjectStore.AdvertiseEndpointIsSet() {
+ // if the advertise endpoint is explicitly set, it takes precedence as the only endpoint
+ m["endpoint"] = advertiseEndpoint
+ return m
+ }
+
+ if cephObjectStore.Spec.Gateway.Port != 0 && cephObjectStore.Spec.Gateway.SecurePort != 0 {
+ // by definition, advertiseEndpoint should prefer HTTPS, so the inverse arrangement doesn't apply
+ m["secureEndpoint"] = advertiseEndpoint
m["endpoint"] = BuildDNSEndpoint(GetStableDomainName(cephObjectStore), cephObjectStore.Spec.Gateway.Port, false)
- } else if cephObjectStore.Spec.Gateway.SecurePort != 0 {
- m["endpoint"] = BuildDNSEndpoint(GetStableDomainName(cephObjectStore), cephObjectStore.Spec.Gateway.SecurePort, true)
} else {
- m["endpoint"] = BuildDNSEndpoint(GetStableDomainName(cephObjectStore), cephObjectStore.Spec.Gateway.Port, false)
+ m["endpoint"] = advertiseEndpoint
}
return m
diff --git a/pkg/operator/ceph/object/status_test.go b/pkg/operator/ceph/object/status_test.go
index be8b743c977a..d6195a743ee2 100644
--- a/pkg/operator/ceph/object/status_test.go
+++ b/pkg/operator/ceph/object/status_test.go
@@ -25,36 +25,140 @@ import (
)
func TestBuildStatusInfo(t *testing.T) {
- // Port enabled and SecurePort disabled
- cephObjectStore := &cephv1.CephObjectStore{
+ baseStore := &cephv1.CephObjectStore{
ObjectMeta: metav1.ObjectMeta{
Name: "my-store",
Namespace: "rook-ceph",
},
}
- cephObjectStore.Spec.Gateway.Port = 80
- statusInfo := buildStatusInfo(cephObjectStore)
+ // Port enabled and SecurePort disabled
+ s := baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ statusInfo := buildStatusInfo(s)
+
assert.NotEmpty(t, statusInfo["endpoint"])
assert.Empty(t, statusInfo["secureEndpoint"])
assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", statusInfo["endpoint"])
// SecurePort enabled and Port disabled
- cephObjectStore.Spec.Gateway.Port = 0
- cephObjectStore.Spec.Gateway.SecurePort = 443
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 0
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
- statusInfo = buildStatusInfo(cephObjectStore)
+ statusInfo = buildStatusInfo(s)
assert.NotEmpty(t, statusInfo["endpoint"])
assert.Empty(t, statusInfo["secureEndpoint"])
assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", statusInfo["endpoint"])
// Both Port and SecurePort enabled
- cephObjectStore.Spec.Gateway.Port = 80
- cephObjectStore.Spec.Gateway.SecurePort = 443
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
- statusInfo = buildStatusInfo(cephObjectStore)
+ statusInfo = buildStatusInfo(s)
assert.NotEmpty(t, statusInfo["endpoint"])
assert.NotEmpty(t, statusInfo["secureEndpoint"])
assert.Equal(t, "http://rook-ceph-rgw-my-store.rook-ceph.svc:80", statusInfo["endpoint"])
assert.Equal(t, "https://rook-ceph-rgw-my-store.rook-ceph.svc:443", statusInfo["secureEndpoint"])
+
+ t.Run("advertiseEndpoint http", func(t *testing.T) {
+ baseStore := &cephv1.CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-store",
+ Namespace: "rook-ceph",
+ },
+ Spec: cephv1.ObjectStoreSpec{
+ Hosting: &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 80,
+ UseTls: false,
+ },
+ },
+ },
+ }
+
+ // Port enabled and SecurePort disabled
+ s := baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ statusInfo := buildStatusInfo(s)
+
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "http://my.endpoint.com:80", statusInfo["endpoint"])
+
+ // SecurePort enabled and Port disabled
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 0
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ statusInfo = buildStatusInfo(s)
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "http://my.endpoint.com:80", statusInfo["endpoint"])
+
+ // Both Port and SecurePort enabled
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ statusInfo = buildStatusInfo(s)
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "http://my.endpoint.com:80", statusInfo["endpoint"])
+ })
+
+ t.Run("advertiseEndpoint https", func(t *testing.T) {
+ baseStore := &cephv1.CephObjectStore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-store",
+ Namespace: "rook-ceph",
+ },
+ Spec: cephv1.ObjectStoreSpec{
+ Hosting: &cephv1.ObjectStoreHostingSpec{
+ AdvertiseEndpoint: &cephv1.ObjectEndpointSpec{
+ DnsName: "my.endpoint.com",
+ Port: 443,
+ UseTls: true,
+ },
+ },
+ },
+ }
+
+ // Port enabled and SecurePort disabled
+ s := baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ statusInfo := buildStatusInfo(s)
+
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "https://my.endpoint.com:443", statusInfo["endpoint"])
+
+ // SecurePort enabled and Port disabled
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 0
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ statusInfo = buildStatusInfo(s)
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "https://my.endpoint.com:443", statusInfo["endpoint"])
+
+ // Both Port and SecurePort enabled
+ s = baseStore.DeepCopy()
+ s.Spec.Gateway.Port = 80
+ s.Spec.Gateway.SecurePort = 443
+ s.Spec.Gateway.SSLCertificateRef = "my-cert"
+
+ statusInfo = buildStatusInfo(s)
+ assert.NotEmpty(t, statusInfo["endpoint"])
+ assert.Empty(t, statusInfo["secureEndpoint"])
+ assert.Equal(t, "https://my.endpoint.com:443", statusInfo["endpoint"])
+ })
}
diff --git a/pkg/operator/ceph/object/user/controller.go b/pkg/operator/ceph/object/user/controller.go
index b55112f2e8e0..2ff73e3d85b2 100644
--- a/pkg/operator/ceph/object/user/controller.go
+++ b/pkg/operator/ceph/object/user/controller.go
@@ -67,15 +67,16 @@ var controllerTypeMeta = metav1.TypeMeta{
// ReconcileObjectStoreUser reconciles a ObjectStoreUser object
type ReconcileObjectStoreUser struct {
- client client.Client
- scheme *runtime.Scheme
- context *clusterd.Context
- objContext *object.AdminOpsContext
- userConfig *admin.User
- cephClusterSpec *cephv1.ClusterSpec
- clusterInfo *cephclient.ClusterInfo
- opManagerContext context.Context
- recorder record.EventRecorder
+ client client.Client
+ scheme *runtime.Scheme
+ context *clusterd.Context
+ objContext *object.AdminOpsContext
+ advertiseEndpoint string
+ userConfig *admin.User
+ cephClusterSpec *cephv1.ClusterSpec
+ clusterInfo *cephclient.ClusterInfo
+ opManagerContext context.Context
+ recorder record.EventRecorder
}
// Add creates a new CephObjectStoreUser Controller and adds it to the Manager. The Manager will set fields on the Controller
@@ -264,7 +265,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci
}
tlsSecretName := store.Spec.Gateway.SSLCertificateRef
- reconcileResponse, err = object.ReconcileCephUserSecret(r.opManagerContext, r.client, r.scheme, cephObjectStoreUser, r.userConfig, r.objContext.Endpoint, cephObjectStoreUser.Namespace, cephObjectStoreUser.Spec.Store, tlsSecretName)
+ reconcileResponse, err = object.ReconcileCephUserSecret(r.opManagerContext, r.client, r.scheme, cephObjectStoreUser, r.userConfig, r.advertiseEndpoint, cephObjectStoreUser.Namespace, cephObjectStoreUser.Spec.Store, tlsSecretName)
if err != nil {
r.updateStatus(k8sutil.ObservedGenerationNotAvailable, request.NamespacedName, k8sutil.ReconcileFailedStatus)
return reconcileResponse, *cephObjectStoreUser, err
@@ -364,7 +365,7 @@ func (r *ReconcileObjectStoreUser) createOrUpdateCephUser(u *cephv1.CephObjectSt
}
// Set access and secret key
- if r.userConfig.Keys == nil {
+ if r.userConfig.Keys == nil || len(r.userConfig.Keys) == 0 {
r.userConfig.Keys = make([]admin.UserKeySpec, 1)
}
r.userConfig.Keys[0].AccessKey = user.Keys[0].AccessKey
@@ -395,6 +396,12 @@ func (r *ReconcileObjectStoreUser) initializeObjectStoreContext(u *cephv1.CephOb
}
}
+ advertiseEndpoint, err := store.GetAdvertiseEndpointUrl()
+ if err != nil {
+ return errors.Wrapf(err, "failed to get CephObjectStore %q advertise endpoint for object store user", u.Spec.Store)
+ }
+ r.advertiseEndpoint = advertiseEndpoint
+
objContext, err := object.NewMultisiteContext(r.context, r.clusterInfo, store)
if err != nil {
return errors.Wrapf(err, "Multisite failed to set on object context for object store user")
@@ -439,6 +446,7 @@ func generateUserConfig(user *cephv1.CephObjectStoreUser) admin.User {
userConfig := admin.User{
ID: user.Name,
DisplayName: displayName,
+ Keys: make([]admin.UserKeySpec, 0),
}
defaultMaxBuckets := 1000
diff --git a/pkg/operator/ceph/pool/radosnamespace/controller.go b/pkg/operator/ceph/pool/radosnamespace/controller.go
index 7811818f8917..cd8e6fe7d5cc 100644
--- a/pkg/operator/ceph/pool/radosnamespace/controller.go
+++ b/pkg/operator/ceph/pool/radosnamespace/controller.go
@@ -24,6 +24,7 @@ import (
"strings"
"time"
+ csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
@@ -104,6 +105,11 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}
+ err = csiopv1a1.AddToScheme(mgr.GetScheme())
+ if err != nil {
+ return err
+ }
+
return nil
}
@@ -256,6 +262,14 @@ func (r *ReconcileCephBlockPoolRadosNamespace) reconcile(request reconcile.Reque
}
r.updateStatus(r.client, namespacedName, cephv1.ConditionReady)
+
+ if csi.EnableCSIOperator() {
+ err = csi.CreateUpdateClientProfileRadosNamespace(r.clusterInfo.Context, r.client, r.clusterInfo, cephBlockPoolRadosNamespacedName, buildClusterID(cephBlockPoolRadosNamespace), cephCluster.Name)
+ if err != nil {
+ return reconcile.Result{}, errors.Wrap(err, "failed to create ceph csi-op config CR for RadosNamespace")
+ }
+ }
+
// Return and do not requeue
logger.Debugf("done reconciling cephBlockPoolRadosNamespace %q", namespacedName)
return reconcile.Result{}, nil
diff --git a/pkg/operator/k8sutil/customresource.go b/pkg/operator/k8sutil/customresource.go
index 1298a7fc0fed..487d088b685c 100644
--- a/pkg/operator/k8sutil/customresource.go
+++ b/pkg/operator/k8sutil/customresource.go
@@ -54,19 +54,7 @@ func WatchCR(resource CustomResource, namespace string, handlers cache.ResourceE
resource.Plural,
namespace,
fields.Everything())
- _, controller := cache.NewInformer(
- source,
-
- // The object type.
- objType,
-
- // resyncPeriod
- // Every resyncPeriod, all resources in the cache will retrigger events.
- // Set to 0 to disable the resync.
- 0,
-
- // Your custom resource event handlers.
- handlers)
+ _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ListerWatcher: source, ObjectType: objType, ResyncPeriod: 0, Handler: handlers})
go controller.Run(done)
<-done
diff --git a/tests/framework/clients/object.go b/tests/framework/clients/object.go
index a5df6584702d..a23703fdc012 100644
--- a/tests/framework/clients/object.go
+++ b/tests/framework/clients/object.go
@@ -18,7 +18,6 @@ package clients
import (
"fmt"
-
"github.com/coreos/pkg/capnslog"
"github.com/rook/rook/tests/framework/installer"
"github.com/rook/rook/tests/framework/utils"
@@ -40,10 +39,11 @@ func CreateObjectOperation(k8sh *utils.K8sHelper, manifests installer.CephManife
}
// ObjectCreate Function to create a object store in rook
-func (o *ObjectOperation) Create(namespace, storeName string, replicaCount int32, tlsEnable bool) error {
+func (o *ObjectOperation) Create(namespace, storeName string, replicaCount int32, tlsEnable bool, swiftAndKeystone bool) error {
logger.Info("creating the object store via CRD")
- if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStore(storeName, int(replicaCount), rgwPort, tlsEnable)); err != nil {
+
+ if err := o.k8sh.ResourceOperation("apply", o.manifests.GetObjectStore(storeName, int(replicaCount), rgwPort, tlsEnable, swiftAndKeystone)); err != nil {
return err
}
diff --git a/tests/framework/installer/ceph_helm_installer.go b/tests/framework/installer/ceph_helm_installer.go
index 95a5f43592bc..a8042469cae8 100644
--- a/tests/framework/installer/ceph_helm_installer.go
+++ b/tests/framework/installer/ceph_helm_installer.go
@@ -282,7 +282,7 @@ func (h *CephInstaller) CreateFileSystemConfiguration(values map[string]interfac
// CreateObjectStoreConfiguration creates an object store configuration
func (h *CephInstaller) CreateObjectStoreConfiguration(values map[string]interface{}, name, scName string) error {
- testObjectStoreBytes := []byte(h.Manifests.GetObjectStore(name, 2, 8080, false))
+ testObjectStoreBytes := []byte(h.Manifests.GetObjectStore(name, 2, 8080, false, false))
var testObjectStoreCRD map[string]interface{}
if err := yaml.Unmarshal(testObjectStoreBytes, &testObjectStoreCRD); err != nil {
return err
diff --git a/tests/framework/installer/ceph_manifests.go b/tests/framework/installer/ceph_manifests.go
index 32288fa1f568..43a72d9bd830 100644
--- a/tests/framework/installer/ceph_manifests.go
+++ b/tests/framework/installer/ceph_manifests.go
@@ -47,7 +47,7 @@ type CephManifests interface {
GetNFS(name string, daemonCount int) string
GetNFSPool() string
GetRBDMirror(name string, daemonCount int) string
- GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string
+ GetObjectStore(name string, replicaCount, port int, tlsEnable bool, swiftAndKeystone bool) string
GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string
GetBucketStorageClass(storeName, storageClassName, reclaimPolicy string) string
GetOBC(obcName, storageClassName, bucketName string, maxObject string, createBucket bool) string
@@ -464,35 +464,30 @@ spec:
requireSafeReplicaSize: false`
}
-func (m *CephManifestsMaster) GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string {
- if tlsEnable {
- return `apiVersion: ceph.rook.io/v1
-kind: CephObjectStore
-metadata:
- name: ` + name + `
- namespace: ` + m.settings.Namespace + `
-spec:
- metadataPool:
- replicated:
- size: 1
- requireSafeReplicaSize: false
- compressionMode: passive
- dataPool:
- replicated:
- size: 1
- requireSafeReplicaSize: false
- gateway:
- resources: null
- securePort: ` + strconv.Itoa(port) + `
- instances: ` + strconv.Itoa(replicaCount) + `
- sslCertificateRef: ` + name + `
-`
+func (m *CephManifestsMaster) GetObjectStore(name string, replicaCount, port int, tlsEnable bool, swiftAndKeystone bool) string {
+ type Spec struct {
+ Name string
+ TLS bool
+ Port int
+ ReplicaCount int
+ SwiftAndKeystone bool
+ Manifests *CephManifestsMaster
}
- return `apiVersion: ceph.rook.io/v1
+
+ spec := Spec{
+ Name: name,
+ TLS: tlsEnable,
+ ReplicaCount: replicaCount,
+ Port: port,
+ SwiftAndKeystone: swiftAndKeystone,
+ Manifests: m,
+ }
+
+ tmpl := `apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
- name: ` + name + `
- namespace: ` + m.settings.Namespace + `
+ name: {{ .Name }}
+ namespace: {{ .Manifests.Settings.Namespace }}
spec:
metadataPool:
replicated:
@@ -503,11 +498,36 @@ spec:
replicated:
size: 1
requireSafeReplicaSize: false
+ {{ if .SwiftAndKeystone }}
+ auth:
+ keystone:
+ acceptedRoles:
+ - admin
+ - member
+ - service
+ implicitTenants: "true"
+ revocationInterval: 1200
+ serviceUserSecretName: usersecret
+ tokenCacheSize: 1000
+ url: https://keystone.{{ .Manifests.Settings.Namespace }}.svc/
+ protocols:
+ swift:
+ accountInUrl: false
+ urlPrefix: foobar
+ s3:
+ enabled: true
+ authUseKeystone: true
+ {{ end }}
gateway:
resources: null
- port: ` + strconv.Itoa(port) + `
- instances: ` + strconv.Itoa(replicaCount) + `
-`
+ {{ if .TLS }}securePort: {{ .Port }}{{ else }}port: {{ .Port }}{{ end }}
+ instances: {{ .ReplicaCount }}
+ {{ if .SwiftAndKeystone }}
+ caBundleRef: keystone-bundle
+ {{ end }}
+ {{ if .TLS }}sslCertificateRef: {{ .Name }}{{ end }}`
+
+ return renderTemplate(tmpl, spec)
}
func (m *CephManifestsMaster) GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string {
diff --git a/tests/framework/installer/ceph_manifests_previous.go b/tests/framework/installer/ceph_manifests_previous.go
index 5d9d99b2a5ee..3d48fb66c67d 100644
--- a/tests/framework/installer/ceph_manifests_previous.go
+++ b/tests/framework/installer/ceph_manifests_previous.go
@@ -131,8 +131,11 @@ func (m *CephManifestsPreviousVersion) GetNFSPool() string {
return m.latest.GetNFSPool()
}
-func (m *CephManifestsPreviousVersion) GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string {
- return m.latest.GetObjectStore(name, replicaCount, port, tlsEnable)
+func (m *CephManifestsPreviousVersion) GetObjectStore(name string, replicaCount, port int, tlsEnable bool, swiftAndKeystone bool) string {
+ if swiftAndKeystone {
+ panic("Previous version does not support swift or keystone")
+ }
+ return m.latest.GetObjectStore(name, replicaCount, port, tlsEnable, false)
}
func (m *CephManifestsPreviousVersion) GetObjectStoreUser(name, displayName, store, usercaps, maxsize string, maxbuckets, maxobjects int) string {
diff --git a/tests/framework/installer/installer.go b/tests/framework/installer/installer.go
index c44602b9516e..0206f504868b 100644
--- a/tests/framework/installer/installer.go
+++ b/tests/framework/installer/installer.go
@@ -18,7 +18,9 @@ package installer
import (
"fmt"
+ "strings"
"testing"
+ "text/template"
"github.com/coreos/pkg/capnslog"
"github.com/rook/rook/tests/framework/utils"
@@ -59,3 +61,16 @@ func checkError(t *testing.T, err error, message string) {
}
assert.NoError(t, err, "%s. %+v", message, err)
}
+
+func renderTemplate(templateSource string, data any) string {
+ templateInstance, err := template.New("template").Parse(templateSource)
+ if err != nil {
+ panic(fmt.Errorf("syntax error in template: %s", err))
+ }
+ var builder strings.Builder
+ err = templateInstance.Execute(&builder, data)
+ if err != nil {
+ panic(fmt.Errorf("error while rendering the template: %s", err))
+ }
+ return builder.String()
+}
diff --git a/tests/framework/installer/settings.go b/tests/framework/installer/settings.go
index 7cea682b0737..145db3b7741f 100644
--- a/tests/framework/installer/settings.go
+++ b/tests/framework/installer/settings.go
@@ -26,7 +26,7 @@ import (
"github.com/rook/rook/tests/framework/utils"
)
-var imageMatch = regexp.MustCompile(`image: rook\/ceph:[a-z0-9.-]+`)
+var imageMatch = regexp.MustCompile(`image: docker.io/rook\/ceph:[a-z0-9.-]+`)
func readManifest(filename string) string {
rootDir, err := utils.FindRookRoot()
@@ -39,7 +39,7 @@ func readManifest(filename string) string {
if err != nil {
panic(errors.Wrapf(err, "failed to read manifest at %s", manifest))
}
- return imageMatch.ReplaceAllString(string(contents), "image: rook/ceph:"+LocalBuildTag)
+ return imageMatch.ReplaceAllString(string(contents), "image: docker.io/rook/ceph:"+LocalBuildTag)
}
func buildURL(rookVersion, filename string) string {
diff --git a/tests/framework/utils/k8s_helper.go b/tests/framework/utils/k8s_helper.go
index c69b06dd4a0f..61344c35725a 100644
--- a/tests/framework/utils/k8s_helper.go
+++ b/tests/framework/utils/k8s_helper.go
@@ -154,9 +154,9 @@ func (k8sh *K8sHelper) SetDeploymentVersion(namespace, deploymentName, container
return err
}
-// Kubectl is wrapper for executing kubectl commands
-func (k8sh *K8sHelper) Kubectl(args ...string) (string, error) {
- result, err := k8sh.executor.ExecuteCommandWithTimeout(15*time.Second, "kubectl", args...)
+// KubectlWithTimeout is wrapper for executing kubectl commands
+func (k8sh *K8sHelper) KubectlWithTimeout(timeout time.Duration, args ...string) (string, error) {
+ result, err := k8sh.executor.ExecuteCommandWithTimeout(timeout*time.Second, "kubectl", args...)
if err != nil {
k8slogger.Errorf("Failed to execute: %s %+v : %+v. %s", cmd, args, err, result)
if args[0] == "delete" {
@@ -168,6 +168,13 @@ func (k8sh *K8sHelper) Kubectl(args ...string) (string, error) {
return result, nil
}
+// Kubectl is wrapper for executing kubectl commands and a timeout of 15 seconds
+func (k8sh *K8sHelper) Kubectl(args ...string) (string, error) {
+
+ return k8sh.KubectlWithTimeout(15, args...)
+
+}
+
// KubectlWithStdin is wrapper for executing kubectl commands in stdin
func (k8sh *K8sHelper) KubectlWithStdin(stdin string, args ...string) (string, error) {
diff --git a/tests/framework/utils/snapshot.go b/tests/framework/utils/snapshot.go
index 68dd2023d53d..d0bb73ad78d0 100644
--- a/tests/framework/utils/snapshot.go
+++ b/tests/framework/utils/snapshot.go
@@ -27,7 +27,7 @@ import (
const (
// snapshotterVersion from which the snapshotcontroller and CRD will be
// installed
- snapshotterVersion = "v7.0.2"
+ snapshotterVersion = "v8.0.1"
repoURL = "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter"
rbacPath = "deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml"
controllerPath = "deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
diff --git a/tests/integration/ceph_auth_keystone_test.go b/tests/integration/ceph_auth_keystone_test.go
new file mode 100644
index 000000000000..cf4625b725ad
--- /dev/null
+++ b/tests/integration/ceph_auth_keystone_test.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2023 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "context"
+ "testing"
+
+ "github.com/rook/rook/tests/framework/clients"
+ "github.com/rook/rook/tests/framework/installer"
+ "github.com/rook/rook/tests/framework/utils"
+ "github.com/stretchr/testify/suite"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ***************************************************
+// *** Major scenarios tested by the TestKeystoneAuthSuite ***
+// Setup
+// - A CephObject store will be created in a test cluster
+// Access
+// - Create a container
+// - Put a file in a container
+// - Get a file from the container
+// - Try to get the file without valid credentials
+// ***************************************************
+// *** Keystone/S3/Swift integration test implementation details ***
+// ceph_auth_keystone_test.go and ceph_base_keystone_test.go contain
+// integration tests for swift/s3 and keystone.
+// The tests are done against a minimal but functional setup of
+// OpenStack Keystone.
+// As it would be very complex to mock all of the involved keystone
+// functionality, the decision was made to use a working keystone setup
+// instead of implementing mocks.
+// The keystone container image used is provided by the Yaook-project
+// (see https://yaook.cloud).
+// The keystone configuration uses sqlite as the database backend.
+// cert-manager and trust-manager are used to provide the certificates
+// (cert-manager) and distribute the ca-certificate file(s) using a secret
+// (trust-manager).
+// To prevent hardcoded credentials, passwords are generated by the tests.
+// The integration tests use the aws s3 client (s3-functionality) and the
+// OpenStack CLI client (keystone- and swift-functionality).
+// (see https://docs.openstack.org/python-openstackclient/latest/).
+// This was a conscious design decision to use the standard client tooling
+// as close as possible to the used instead of using other go-libraries
+// (such as gophercloud) to (re-)implement the client tooling for the test.
+// ***************************************************
+
+func TestCephKeystoneAuthSuite(t *testing.T) {
+ s := new(KeystoneAuthSuite)
+ defer func(s *KeystoneAuthSuite) {
+ HandlePanics(recover(), s.TearDownSuite, s.T)
+ }(s)
+ suite.Run(t, s)
+}
+
+type KeystoneAuthSuite struct {
+ suite.Suite
+ helper *clients.TestClient
+ installer *installer.CephInstaller
+ settings *installer.TestCephSettings
+ k8shelper *utils.K8sHelper
+}
+
+func (h *KeystoneAuthSuite) SetupSuite() {
+ namespace := "keystoneauth-ns"
+ h.settings = &installer.TestCephSettings{
+ Namespace: namespace,
+ OperatorNamespace: namespace,
+ StorageClassName: "",
+ UseHelm: true,
+ UsePVC: false,
+ Mons: 1,
+ SkipOSDCreation: false,
+ // EnableAdmissionController: true,
+ EnableDiscovery: true,
+ ChangeHostName: true,
+ ConnectionsEncrypted: true,
+ RookVersion: installer.LocalBuildTag,
+ CephVersion: installer.QuincyVersion,
+ SkipClusterCleanup: false,
+ SkipCleanupPolicy: false,
+ }
+ h.settings.ApplyEnvVars()
+ h.installer, h.k8shelper = StartTestCluster(h.T, h.settings)
+
+ // install yaook-keystone here
+ err := InstallKeystoneInTestCluster(h.k8shelper, namespace)
+ h.Suite.NoErrorf(err, "Failed to install Keystone in cluster")
+
+ // create usersecret for object store to use
+ testCtx := context.TODO()
+
+ secrets := map[string][]byte{
+ "OS_AUTH_TYPE": []byte("password"),
+ "OS_IDENTITY_API_VERSION": []byte("3"),
+ "OS_PROJECT_DOMAIN_NAME": []byte("Default"),
+ "OS_USER_DOMAIN_NAME": []byte("Default"),
+ "OS_PROJECT_NAME": []byte(testuserdata["rook-user"]["project"]),
+ "OS_USERNAME": []byte(testuserdata["rook-user"]["username"]),
+ "OS_PASSWORD": []byte(testuserdata["rook-user"]["password"]),
+ }
+
+ secret := &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "usersecret",
+ Namespace: namespace,
+ },
+ Data: secrets,
+ }
+
+ _, err = h.k8shelper.Clientset.CoreV1().Secrets(namespace).Create(testCtx, secret, metav1.CreateOptions{})
+ h.Suite.NoErrorf(err, "Failed to create keystone usersecret")
+
+ h.helper = clients.CreateTestClient(h.k8shelper, h.installer.Manifests)
+}
+
+func (h *KeystoneAuthSuite) TearDownSuite() {
+ CleanUpKeystoneInTestCluster(h.k8shelper, h.settings.Namespace)
+
+ // remove user secret
+ if _, err := h.k8shelper.KubectlWithTimeout(30, "delete", "-n", h.settings.Namespace, "secret", "usersecret"); err != nil {
+ logger.Warningf("Could not remove user secret: %s", err)
+ }
+
+ h.installer.UninstallRook()
+}
+
+func (h *KeystoneAuthSuite) AfterTest(suiteName, testName string) {
+ h.installer.CollectOperatorLog(suiteName, testName)
+}
+
+// Test Object StoreCreation on Rook that was installed via helm
+func (h *KeystoneAuthSuite) TestObjectStoreOnRookInstalledViaHelmUsingKeystone() {
+ deleteStore := true
+ tls := false
+ swiftAndKeystone := true
+
+ runObjectE2ETestLite(h.T(), h.helper, h.k8shelper, h.installer, h.settings.Namespace, "default", 3, deleteStore, tls, swiftAndKeystone)
+}
+
+func (h *KeystoneAuthSuite) TestWithSwiftAndKeystone() {
+ deleteStore := true
+ tls := false
+ swiftAndKeystone := true
+
+ objectStoreServicePrefix = objectStoreServicePrefixUniq
+ runSwiftE2ETest(h.T(), h.helper, h.k8shelper, h.installer, h.settings.Namespace, "default", 3, deleteStore, tls, swiftAndKeystone)
+ cleanUpTLSks(h)
+
+}
+
+func (h *KeystoneAuthSuite) TestWithS3AndKeystone() {
+ deleteStore := true
+ tls := false
+ swiftAndKeystone := true
+
+ objectStoreServicePrefix = objectStoreServicePrefixUniq
+ runS3E2ETest(h.T(), h.helper, h.k8shelper, h.installer, h.settings.Namespace, "default", 3, deleteStore, tls, swiftAndKeystone)
+ cleanUpTLSks(h)
+}
+
+func cleanUpTLSks(h *KeystoneAuthSuite) {
+ err := h.k8shelper.Clientset.CoreV1().Secrets(h.settings.Namespace).Delete(context.TODO(), objectTLSSecretName, metav1.DeleteOptions{})
+ if err != nil {
+ if !errors.IsNotFound(err) {
+ logger.Fatal("failed to deleted store TLS secret")
+ }
+ }
+ logger.Info("successfully deleted store TLS secret")
+}
diff --git a/tests/integration/ceph_base_keystone_test.go b/tests/integration/ceph_base_keystone_test.go
new file mode 100644
index 000000000000..7fc019fbc786
--- /dev/null
+++ b/tests/integration/ceph_base_keystone_test.go
@@ -0,0 +1,1242 @@
+/*
+Copyright 2023 The Rook Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "github.com/rook/rook/tests/framework/clients"
+ "github.com/rook/rook/tests/framework/installer"
+ "github.com/rook/rook/tests/framework/utils"
+ "github.com/sethvargo/go-password/password"
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const testProjectName = "testproject"
+
+var testuserdata = map[string]map[string]string{
+ "admin": {
+ "description": "keystone admin account",
+ "username": "admin",
+ "project": "admin",
+ "role": "admin",
+ },
+ "rook-user": {
+ "description": "swift admin account",
+ "project": "admin",
+ "username": "rook-user",
+ "role": "admin",
+ },
+ "alice": {
+ "description": "normal user account",
+ "username": "alice",
+ "project": testProjectName,
+ "role": "member",
+ },
+ "carol": {
+ "description": "normal user account",
+ "username": "carol",
+ "project": testProjectName,
+ "role": "admin",
+ },
+ "mallory": {
+ "description": "bad actor user",
+ "username": "mallory",
+ "project": testProjectName,
+ "role": "",
+ },
+}
+
+func InstallKeystoneInTestCluster(shelper *utils.K8sHelper, namespace string) error {
+
+ if err := initializePasswords(); err != nil {
+
+ return err
+ }
+
+ ctx := context.TODO()
+
+ // The namespace keystoneauth-ns is created by SetupSuite
+
+ if err := shelper.CreateNamespace("cert-manager"); err != nil {
+
+ logger.Error("Could not create namespace cert-manager")
+ return err
+
+ }
+
+ // install cert-manager using helm
+ // the helm installer uses the rook repository and cannot be used as is
+ // therefore parts of the installer are adapted here
+
+ // use helm path from environment (the same is used by the helm installer)
+ helmPath := os.Getenv("TEST_HELM_PATH")
+ if helmPath == "" {
+ helmPath = "/tmp/rook-tests-scripts-helm/helm"
+ }
+ helmHelper := utils.NewHelmHelper(helmPath)
+
+ // add the cert-manager helm repo
+ logger.Infof("adding cert-manager helm repo")
+ cmdArgs := []string{"repo", "add", "jetstack", "https://charts.jetstack.io"}
+ if _, err := helmHelper.Execute(cmdArgs...); err != nil {
+ // Continue on error in case the repo already was added
+ logger.Errorf("failed to add repo cert-manager, err=%v", err)
+ return err
+ }
+ cmdArgs = []string{"repo", "update"}
+ if _, err := helmHelper.Execute(cmdArgs...); err != nil {
+ // Continue on error in case the repo already was added
+ logger.Warningf("failed to update helm repositories, err=%v", err)
+ return err
+ }
+
+ if err := installHelmChart(helmHelper, "cert-manager", "cert-manager", "jetstack/cert-manager", "1.13.3",
+ "installCRDs=true"); err != nil {
+ return err
+ }
+
+ // trust-manager does not support k8s<1.25
+ // This allows for secrets to be read/written by trust-manager in all namespaces
+ // This is considered insecure in production environments! This is here only for the quick test setup.
+ if err := installHelmChart(helmHelper, "cert-manager", "trust-manager", "jetstack/trust-manager", "0.7.0",
+ "app.trust.namespace="+namespace, "installCRDs=true", "secretTargets.enabled=true", "secretTargets.authorizedSecretsAll=true"); err != nil {
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneApiClusterIssuer(namespace)); err != nil {
+ logger.Errorf("Could not apply ClusterIssuer in namespace %s: %s", namespace, err)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneApiCaCertificate(namespace)); err != nil {
+ logger.Errorf("Could not apply ClusterIssuer CA Certificate in namespace %s: %s", namespace, err)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneApiCaIssuer(namespace)); err != nil {
+ logger.Errorf("Could not install CA Issuer in namespace %s: %s", namespace, err)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneApiCertificate(namespace)); err != nil {
+ logger.Errorf("Could not create Certificate (request) in namespace %s", namespace)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", trustManagerBundle(namespace)); err != nil {
+ logger.Errorf("Could not create CA Certificate Bundle in namespace %s", namespace)
+ return err
+ }
+
+ data := getKeystoneApache2CM(namespace)
+
+ keystoneApacheCM := &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "keystone-apache2-conf",
+ Namespace: namespace,
+ },
+ Data: data,
+ }
+
+ if _, err := shelper.Clientset.CoreV1().ConfigMaps(namespace).Create(ctx, keystoneApacheCM, metav1.CreateOptions{}); err != nil {
+
+ logger.Fatalf("failed to create apache2.conf configmap in namespace %s with error %s", namespace, err)
+ return err
+
+ }
+
+ secretData := getKeystoneConfig()
+
+ keystoneConfSecret := &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "keystone-config",
+ Namespace: namespace,
+ },
+ Data: secretData,
+ }
+
+ if _, err := shelper.Clientset.CoreV1().Secrets(namespace).Create(ctx, keystoneConfSecret, metav1.CreateOptions{}); err != nil {
+ logger.Errorf("Could not create keystone config secret in namespace %s", namespace)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneDeployment(namespace, testuserdata["admin"]["password"])); err != nil {
+ logger.Errorf("Could not create keystone deployment in namespace %s", namespace)
+ return err
+ }
+
+ if err := shelper.WaitForPodCount("app=keystone", namespace, 1); err != nil {
+ logger.Errorf("Wait for keystone pod failed in namespace %s", namespace)
+ return err
+ }
+
+ if _, err := shelper.KubectlWithTimeout(315, "wait", "--timeout=300s", "--namespace", namespace, "pod", "--selector=app=keystone", "--for=condition=Ready"); err != nil {
+ logger.Errorf("Failed to wait for pod keystone in namespace %s", namespace)
+ return err
+ }
+
+ if err := shelper.ResourceOperation("apply", keystoneService(namespace)); err != nil {
+ logger.Errorf("Could not create service for keystone in namespace %s", namespace)
+ return err
+ }
+
+ for _, userdata := range testuserdata {
+
+ if err := shelper.ResourceOperation("apply", createOpenStackClient(namespace, userdata["project"], userdata["username"], userdata["password"])); err != nil {
+ logger.Errorf("Could not create openstack client deployment in namespace %s", namespace)
+ return err
+ }
+
+ }
+
+ return nil
+
+}
+
+func initializePasswords() error {
+
+ for user := range testuserdata {
+
+ var err error
+
+ if testuserdata[user]["password"], err = password.Generate(20, 2, 0, false, false); err != nil {
+
+ logger.Errorf("Failed to initialize password for user %s: %s", user, err)
+ return err
+
+ }
+
+ }
+
+ return nil
+
+}
+
+func createOpenStackClient(namespace string, project string, username string, password string) string {
+ return `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: osc-` + project + `-` + username + `
+ namespace: ` + namespace + `
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app: osc-` + project + `-` + username + `
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: osc-` + project + `-` + username + `
+ spec:
+ containers:
+ - command:
+ - sleep
+ - "7200"
+ image: nixery.dev/shell/awscli2/openstackclient/jq/busybox
+ env:
+ - name: REQUESTS_CA_BUNDLE
+ value: /etc/ssl/keystone/ca.crt
+ - name: OS_AUTH_TYPE
+ value: password
+ - name: OS_AUTH_URL
+ value: https://keystone.` + namespace + `.svc/v3
+ - name: OS_IDENTITY_API_VERSION
+ value: "3"
+ - name: OS_PROJECT_DOMAIN_NAME
+ value: Default
+ - name: OS_INTERFACE
+ value: internal
+ - name: OS_USER_DOMAIN_NAME
+ value: Default
+ - name: OS_PROJECT_NAME
+ value: ` + project + `
+ - name: OS_USERNAME
+ value: ` + username + `
+ - name: OS_PASSWORD
+ value: "` + password + `"
+ imagePullPolicy: IfNotPresent
+ name: openstackclient
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /etc/ssl/keystone
+ name: keystone-certificate
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: keystone-certificate
+ secret:
+ defaultMode: 420
+ secretName: keystone-api-tls
+`
+}
+
+func trustManagerBundle(namespace string) string {
+
+ return `apiVersion: trust.cert-manager.io/v1alpha1
+kind: Bundle
+metadata:
+ name: keystone-bundle
+ namespace: ` + namespace + `
+spec:
+ sources:
+ - useDefaultCAs: true
+ - secret:
+ name: "root-secret"
+ key: "tls.crt"
+ target:
+ secret:
+ key: "cabundle"`
+
+}
+
+func installHelmChart(helmHelper *utils.HelmHelper, namespace string, chartName string, chart string, version string, settings ...string) error {
+
+ logger.Infof("installing helm chart %s with version %s", chart, version)
+
+ arguments := []string{"upgrade", "--install", "--debug", "--namespace", namespace, chartName, chart, "--version=" + version, "--wait"}
+
+ for _, setting := range settings {
+
+ arguments = append(arguments, "--set", setting)
+
+ }
+
+ _, err := helmHelper.Execute(arguments...)
+ if err != nil {
+ logger.Errorf("failed to install helm chart %s with version %s in namespace: %v, err=%v", chart, version, namespace, err)
+ return err
+ }
+
+ return nil
+
+}
+
+func keystoneApiCaIssuer(namespace string) string {
+
+ return `apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+ name: my-ca-issuer
+ namespace: ` + namespace + `
+spec:
+ ca:
+ secretName: root-secret
+`
+
+}
+
+func keystoneApiCaCertificate(namespace string) string {
+
+ return `apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: my-selfsigned-ca
+ namespace: ` + namespace + `
+spec:
+ isCA: true
+ commonName: my-selfsigned-ca
+ secretName: root-secret
+ privateKey:
+ algorithm: ECDSA
+ size: 256
+ issuerRef:
+ name: selfsigned-issuer
+ kind: ClusterIssuer
+ group: cert-manager.io`
+
+}
+
+func keystoneApiClusterIssuer(namespace string) string {
+
+ return `apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: selfsigned-issuer
+ namespace: ` + namespace + `
+spec:
+ selfSigned: {}
+`
+
+}
+
+func keystoneService(namespace string) string {
+
+ return `apiVersion: v1
+kind: Service
+metadata:
+ name: keystone
+ namespace: ` + namespace + `
+spec:
+ ports:
+ - name: internal
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ - name: external
+ port: 5001
+ protocol: TCP
+ targetPort: 5001
+ selector:
+ app: keystone
+ sessionAffinity: None
+ type: ClusterIP
+status:
+ loadBalancer: {}`
+
+}
+
+func keystoneApiCertificate(namespace string) string {
+
+ return `
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: keystone-api
+ namespace: ` + namespace + `
+spec:
+ secretName: keystone-api-tls
+ duration: 10h
+ renewBefore: 9h
+ subject:
+ organizations:
+ - rook-integrationtest-keystone-api
+ isCA: false
+ privateKey:
+ algorithm: RSA
+ encoding: PKCS1
+ size: 2048
+ usages:
+ - server auth
+ dnsNames:
+ - keystone.` + namespace + `.svc
+ issuerRef:
+ name: my-ca-issuer
+ kind: Issuer
+`
+
+}
+
+func keystoneDeployment(namespace string, adminpassword string) string {
+
+ return `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: keystone-api
+ namespace: ` + namespace + `
+ labels:
+ app: keystone
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: keystone
+ template:
+ metadata:
+ labels:
+ app: keystone
+ spec:
+ initContainers:
+ - name: init-fernet
+ image: registry.yaook.cloud/yaook/keystone-yoga:3.0.30
+ command: ['sh', '-c', 'keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone']
+ volumeMounts:
+ - mountPath: /etc/keystone/keystone.conf
+ name: keystone-config-vol
+ subPath: keystone.conf
+ - mountPath: /var/keystone
+ name: dbdir
+ - mountPath: /etc/keystone/fernet-keys
+ name: keystonefernet
+ securityContext:
+ runAsUser: 2500001
+ - name: init-db
+ image: registry.yaook.cloud/yaook/keystone-yoga:3.0.30
+ command: ['sh', '-c', 'keystone-manage db_sync']
+ volumeMounts:
+ - mountPath: /etc/keystone/keystone.conf
+ name: keystone-config-vol
+ subPath: keystone.conf
+ - mountPath: /var/keystone
+ name: dbdir
+ securityContext:
+ runAsUser: 2500001
+ - name: init-keystone-endpoint
+ image: registry.yaook.cloud/yaook/keystone-yoga:3.0.30
+ command: [ 'sh', '-c', 'keystone-manage bootstrap --bootstrap-password ` + adminpassword + ` --bootstrap-username admin --bootstrap-project-name admin --bootstrap-role-name admin --bootstrap-service-name keystone --bootstrap-region-id RegionOne --bootstrap-admin-url https://keystone.` + namespace + `.svc --bootstrap-internal-url https://keystone.` + namespace + `.svc']
+ volumeMounts:
+ - mountPath: /etc/keystone/keystone.conf
+ name: keystone-config-vol
+ subPath: keystone.conf
+ - mountPath: /etc/keystone/fernet-keys
+ name: keystonefernet
+ - mountPath: /var/keystone
+ name: dbdir
+ containers:
+ - env:
+ - name: REQUESTS_CA_BUNDLE
+ value: /etc/pki/tls/certs/ca-bundle.crt
+ - name: WSGI_PROCESSES
+ value: "3"
+ image: registry.yaook.cloud/yaook/keystone-yoga:3.0.30
+ imagePullPolicy: Always
+ name: keystone
+ readinessProbe:
+ exec:
+ command:
+ - curl
+ - -k
+ - https://localhost
+ failureThreshold: 3
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - -k
+ - https://localhost
+ failureThreshold: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /var/keystone
+ name: dbdir
+ - mountPath: /etc/keystone/keystone.conf
+ name: keystone-config-vol
+ subPath: keystone.conf
+ - mountPath: /etc/apache2/apache2.conf
+ name: keystone-apache2-conf
+ subPath: apache2.conf
+ - mountPath: /etc/ssl/keystone
+ name: keystone-certificate
+ - mountPath: /etc/keystone/fernet-keys
+ name: keystonefernet
+ dnsPolicy: ClusterFirst
+ enableServiceLinks: false
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ shareProcessNamespace: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: dbdir
+ emptyDir: {}
+ - name: keystone-config-vol
+ projected:
+ defaultMode: 420
+ sources:
+ - secret:
+ items:
+ - key: keystone.conf
+ path: keystone.conf
+ name: keystone-config
+ - configMap:
+ defaultMode: 420
+ name: keystone-apache2-conf
+ name: keystone-apache2-conf
+ - name: keystonefernet
+ emptyDir: {}
+ - name: ssl-terminator-config
+ emptyDir: {}
+ - name: tls-secret
+ emptyDir: {}
+ - name: keystone-certificate
+ secret:
+ defaultMode: 420
+ secretName: keystone-api-tls`
+
+}
+
+func getKeystoneConfig() map[string][]byte {
+
+ returnMap := make(map[string][]byte)
+
+ keystoneConfig := `[DEFAULT]
+use_stderr = true
+use_json = true
+debug = true
+insecure_debug = true
+
+[identity]
+driver = sql
+
+[database]
+connection = sqlite:////var/keystone/keystone.db
+
+[cache]
+enabled = false`
+
+ returnMap["keystone.conf"] = []byte(keystoneConfig)
+
+ return returnMap
+
+}
+
+func getKeystoneApache2CM(namespace string) map[string]string {
+
+ returnMap := make(map[string]string)
+
+ apache2Config := `LoadModule mpm_event_module modules/mod_mpm_event.so
+LoadModule wsgi_module modules/mod_wsgi.so
+LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
+LoadModule authz_core_module modules/mod_authz_core.so
+LoadModule ssl_module modules/mod_ssl.so
+
+ServerRoot "/etc/apache2"
+Mutex file:/var/lock/apache2 default default
+PidFile /run/apache2/apache2.pid
+Timeout 60
+KeepAlive On
+MaxKeepAliveRequests 100
+KeepAliveTimeout 15
+HostnameLookups Off
+LogLevel warn
+
+User www-data
+Group www-data
+
+Listen 443
+
+ErrorLog "/proc/self/fd/2"
+
+
+ ServerName keystone-api.` + namespace + `.svc
+ SSLEngine on
+ SSLCertificateFile /etc/ssl/keystone/tls.crt
+ SSLCertificateKeyFile /etc/ssl/keystone/tls.key
+ SSLCertificateChainFile /etc/ssl/keystone/ca.crt
+ WSGIDaemonProcess keystone-public processes=${WSGI_PROCESSES} threads=1 user=keystone group=keystone display-name=%{GROUP} home=/usr/local
+ WSGIProcessGroup keystone-public
+ WSGIScriptAlias / /usr/local/bin/keystone-wsgi-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+
+
+ Require all granted
+
+
+ ErrorLogFormat "%M"
+ LogFormat "{ \"asctime\":\"%{%Y-%m-%d %H:%M:%S}t\", \"remoteIP\":\"%a\", \"name\":\"%V\", \"host\":\"%h\", \"request\":\"%U\", \"query\":\"%q\", \"message\":\"%r\", \"method\":\"%m\", \"status\":\"%>s\", \"userAgent\":\"%{User-agent}i\", \"referer\":\"%{Referer}i\" }" logformat
+ CustomLog "/dev/stdout" logformat
+
+TraceEnable Off`
+
+ returnMap["apache2.conf"] = apache2Config
+
+ return returnMap
+
+}
+
+func CleanUpKeystoneInTestCluster(shelper *utils.K8sHelper, namespace string) {
+
+ // Un-Install keystone with yaook
+ err := shelper.DeleteResource("-n", namespace, "configmap", "keystone-apache2-conf")
+ if err != nil {
+ logger.Warningf("Could not delete configmap keystone-apache2-conf in namespace %s", namespace)
+ }
+
+ err = shelper.DeleteResource("-n", namespace, "secret", "keystone-config")
+ if err != nil {
+ logger.Warningf("Could not delete secret keystone-config in namespace %s", namespace)
+ }
+
+ err = shelper.DeleteResource("-n", namespace, "deployment", "keystone-api")
+ if err != nil {
+ logger.Warningf("Could not delete deployment keystone-api in namespace %s", namespace)
+ }
+
+ //cert-manager related resources (including certificates and secrets) are not removed here
+ //(as they will be removed anyway on uninstalling cert-manager)
+
+}
+
+// Test Object StoreCreation on Rook that was installed via helm
+func runSwiftE2ETest(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, deleteStore bool, enableTLS bool, swiftAndKeystone bool) {
+ andDeleting := ""
+ if deleteStore {
+ andDeleting = "and deleting"
+ }
+ logger.Infof("test creating %s object store %q in namespace %q", andDeleting, storeName, namespace)
+
+ testContainerName := "test-container"
+
+ prepareE2ETest(t, helper, k8sh, installer, namespace, storeName, replicaSize, deleteStore, enableTLS, swiftAndKeystone, testContainerName)
+
+ // test with user with read+write access (member-role)
+
+ t.Run("create container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "create", testContainerName,
+ )
+
+ })
+
+ t.Run("show container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "show", testContainerName,
+ )
+
+ })
+
+ t.Run("create local testfile", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "echo test-content > /tmp/testfile",
+ )
+
+ })
+
+ // openstack object create testContainerName /testfile
+ t.Run("create object in container (using the local testfile) (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "create", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("list objects in container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "list", testContainerName,
+ )
+
+ })
+
+ t.Run("show testfile object in container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "show", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("save testfile object from container to local disk (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "save", "--file", "/tmp/testfile.saved", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("check testfile (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "diff /tmp/testfile /tmp/testfile.saved",
+ )
+
+ })
+
+ t.Run("delete object in container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "delete", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("delete container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "delete", testContainerName,
+ )
+
+ })
+
+ // unauthorized (?) access
+ // create container (with alice)
+ t.Run("prepare container for unauthorized access test (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "create", testContainerName,
+ )
+
+ // create object (with alice)
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "echo test-content > /tmp/testfile",
+ )
+
+ // openstack object create testContainerName /testfile
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "create", testContainerName, "/tmp/testfile",
+ )
+
+ // check whether container got created
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "list", testContainerName,
+ )
+
+ })
+
+ // try access container with id (with mallory, expect: denied)
+ t.Run("display a container (as unprivileged user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", false,
+ "openstack", "container", "show", testContainerName,
+ )
+
+ })
+
+ // try read access object with id (with mallory, expect: denied)
+ t.Run("show testfile object in container (as unprivileged user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", false,
+ "openstack", "object", "show", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ // try write access object with id (with mallory, expect: denied)
+ t.Run("create local testfile", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", true,
+ "bash", "-c", "echo bad-content > /tmp/testfile",
+ )
+
+ })
+
+ // openstack object create testContainerName /testfile
+ t.Run("create object in container (using the local testfile) (as unprivileged user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", false,
+ "openstack", "object", "create", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ // try deleting object (with mallory, expect: denied)
+ t.Run("delete object in container (as unprivileged user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", false,
+ "openstack", "object", "delete", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ // try deleting container (with mallory, expect: denied)
+ t.Run("delete container (as unprivileged user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "mallory", false,
+ "openstack", "container", "delete", testContainerName,
+ )
+
+ })
+
+ // try access container with id (with rook-user, expect: success)
+ t.Run("show container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "container", "show", testContainerName,
+ )
+
+ })
+
+ t.Run("create local testfile (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "bash", "-c", "echo test-content > /tmp/testfile",
+ )
+
+ })
+
+ t.Run("create local testfile (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "bash", "-c", "echo test-content > /tmp/testfile-rook-user",
+ )
+
+ })
+
+ // openstack object create testContainerName /testfile
+ // try write access object with id (with rook-user, expect: success)
+ t.Run("create object in container (using the local testfile) (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "create", testContainerName, "/tmp/testfile-rook-user",
+ )
+
+ })
+
+ t.Run("list objects in container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "list", testContainerName,
+ )
+
+ })
+
+ // try read access object with id (with rook-user, expect: success)
+ t.Run("show testfile object in container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "show", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("save testfile object from container to local disk (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "save", "--file", "/tmp/testfile.saved", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("check testfile (admin-user)", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "bash", "-c", "diff /tmp/testfile /tmp/testfile.saved",
+ )
+
+ })
+
+ // try deleting object (with rook-user, expect: success)
+ t.Run("delete object in container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "delete", testContainerName, "/tmp/testfile",
+ )
+
+ })
+
+ t.Run("delete object in container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "object", "delete", testContainerName, "/tmp/testfile-rook-user",
+ )
+
+ })
+
+ // try deleting container (with rook-user, expect: success)
+ t.Run("delete container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "carol", true,
+ "openstack", "container", "delete", testContainerName,
+ )
+ })
+
+ cleanupE2ETest(t, k8sh, namespace, storeName, deleteStore, testContainerName)
+}
+
+func testInOpenStackClient(t *testing.T, sh *utils.K8sHelper, namespace string, projectname string, username string, expectNoError bool, command ...string) {
+
+ commandLine := []string{"exec", "-n", namespace, "deployment/osc-" + projectname + "-" + username, "--"}
+
+ commandLine = append(commandLine, command...)
+ output, err := sh.KubectlWithTimeout(60, commandLine...)
+
+ if err != nil {
+ logger.Warningf("failed to execute command in openstack cli: %s: %s", commandLine, output)
+ }
+
+ logger.Infof("%s", output)
+
+ if expectNoError {
+
+ assert.NoError(t, err)
+
+ } else {
+
+ assert.Error(t, err)
+
+ }
+
+}
+
+func prepareE2ETest(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, deleteStore bool, enableTLS bool, swiftAndKeystone bool, testContainerName string) {
+
+ t.Run("create test project in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "project", "create", testProjectName,
+ )
+
+ })
+
+ for _, value := range testuserdata {
+
+ if value["username"] == "admin" {
+ continue
+ }
+
+ t.Run("create test user "+value["username"]+" in keystone", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "user", "create", "--project", value["project"], "--password", value["password"], value["username"],
+ )
+ })
+
+ if value["role"] != "" {
+
+ t.Run("assign test user "+value["username"]+" to project "+value["project"]+" in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "role", "add", "--user", value["username"], "--project", value["project"], value["role"],
+ )
+
+ })
+
+ }
+
+ }
+
+ createCephObjectStore(t, helper, k8sh, installer, namespace, storeName, replicaSize, enableTLS, swiftAndKeystone)
+
+ t.Run("create service swift in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "service", "create", "--name", "swift", "object-store",
+ )
+
+ })
+
+ t.Run("create internal swift endpoint in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "endpoint", "create", "--region", "default", "--enable", "swift", "internal", ""+rgwServiceUri(storeName, namespace)+"/foobar/v1",
+ )
+
+ })
+
+ t.Run("create admin swift endpoint in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "endpoint", "create", "--region", "default", "--enable", "swift", "admin", ""+rgwServiceUri(storeName, namespace)+"/foobar/v1",
+ )
+
+ })
+}
+
+func cleanupE2ETest(t *testing.T, k8sh *utils.K8sHelper, namespace, storeName string, deleteStore bool, testContainerName string) {
+
+ t.Run("Delete swift endpoints in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "bash", "-c", "openstack endpoint list -f json | jq '.[] | select(.\"Service Name\" == \"swift\") | .ID' -r | xargs openstack endpoint delete",
+ )
+
+ })
+
+ t.Run("Delete service swift in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "service", "delete", "swift",
+ )
+
+ })
+
+ if deleteStore {
+
+ t.Run("delete object store", func(t *testing.T) {
+
+ deleteObjectStore(t, k8sh, namespace, storeName)
+ assertObjectStoreDeletion(t, k8sh, namespace, storeName)
+
+ })
+
+ }
+
+ for _, value := range testuserdata {
+
+ if value["username"] == "admin" {
+ continue
+ }
+
+ t.Run("delete test user "+value["username"]+" in keystone", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "user", "delete", value["username"],
+ )
+ })
+
+ }
+
+ t.Run("delete test project in keystone", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ "admin", "admin", true,
+ "openstack", "project", "delete", testProjectName,
+ )
+
+ })
+
+}
+
+func rgwServiceUri(storeName string, namespace string) string {
+ return "http://" + RgwServiceName(storeName) + "." + namespace + ".svc"
+}
+
+func runS3E2ETest(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, deleteStore bool, enableTLS bool, swiftAndKeystone bool) {
+ andDeleting := ""
+ if deleteStore {
+ andDeleting = "and deleting"
+ }
+ logger.Infof("test creating %s object store %q in namespace %q", andDeleting, storeName, namespace)
+
+ testContainerName := "test-container"
+
+ prepareE2ETest(t, helper, k8sh, installer, namespace, storeName, replicaSize, deleteStore, enableTLS, swiftAndKeystone, testContainerName)
+
+ t.Run("create container (with user being a member)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "create", testContainerName,
+ )
+
+ })
+
+ t.Run("create AWS config file", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "mkdir -p .aws && openstack ec2 credentials create -fjson | jq -r '\"[default]\\naws_access_key_id = \" + .access + \"\\naws_secret_access_key = \" + .secret + \"\\n\"' | tee .aws/credentials && printf '[default]\nregion = idontcare' > .aws/config",
+ )
+ })
+
+ t.Run("List bucket with S3 with aws debug", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --debug --endpoint-url=http://"+RgwServiceName(storeName)+"."+namespace+".svc s3api list-buckets",
+ )
+
+ })
+
+ t.Run("List bucket with S3", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3api list-buckets | jq '.Buckets | .[].Name' -r | grep "+testContainerName,
+ )
+
+ })
+
+ t.Run("List file with S3 created by OS", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "touch testfile2")
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "object", "create", ""+testContainerName+"", "testfile2")
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3 ls s3://"+testContainerName+"| grep testfile2",
+ )
+
+ })
+
+ t.Run("Upload test file using S3", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "echo test-content > /tmp/testfile",
+ )
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3 cp /tmp/testfile s3://"+testContainerName+"/testfile",
+ )
+
+ })
+
+ t.Run("save testfile object from container to local disk", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3 cp s3://"+testContainerName+"/testfile /tmp/testfile.saved")
+ })
+
+ t.Run("check testfile", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "diff /tmp/testfile /tmp/testfile.saved")
+ })
+
+ t.Run("delete object in container", func(t *testing.T) {
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3 rm s3://"+testContainerName+"/testfile")
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "bash", "-c", "aws --endpoint-url="+rgwServiceUri(storeName, namespace)+" s3 rm s3://"+testContainerName+"/testfile2")
+ })
+
+ t.Run("delete container (admin-user)", func(t *testing.T) {
+
+ testInOpenStackClient(t, k8sh, namespace,
+ testProjectName, "alice", true,
+ "openstack", "container", "delete", testContainerName,
+ )
+ })
+
+ cleanupE2ETest(t, k8sh, namespace, storeName, deleteStore, testContainerName)
+}
diff --git a/tests/integration/ceph_base_object_test.go b/tests/integration/ceph_base_object_test.go
index 5f8779a2b2ef..6358935aa212 100644
--- a/tests/integration/ceph_base_object_test.go
+++ b/tests/integration/ceph_base_object_test.go
@@ -42,8 +42,9 @@ import (
)
const (
+ rgwPrefix = "rook-ceph-rgw"
//nolint:gosec // since this is not leaking any hardcoded credentials, it's just the secret name
- objectTLSSecretName = "rook-ceph-rgw-tls-test-store-csr"
+ objectTLSSecretName = rgwPrefix + "-tls-test-store-csr"
)
var (
@@ -66,47 +67,51 @@ var (
)
// Test Object StoreCreation on Rook that was installed via helm
-func runObjectE2ETestLite(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, deleteStore bool, enableTLS bool) {
+func runObjectE2ETestLite(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, deleteStore bool, enableTLS bool, swiftAndKeystone bool) {
andDeleting := ""
if deleteStore {
andDeleting = "and deleting"
}
logger.Infof("test creating %s object store %q in namespace %q", andDeleting, storeName, namespace)
- createCephObjectStore(t, helper, k8sh, installer, namespace, storeName, replicaSize, enableTLS)
+ createCephObjectStore(t, helper, k8sh, installer, namespace, storeName, replicaSize, enableTLS, swiftAndKeystone)
if deleteStore {
t.Run("delete object store", func(t *testing.T) {
deleteObjectStore(t, k8sh, namespace, storeName)
assertObjectStoreDeletion(t, k8sh, namespace, storeName)
})
+ // remove user secret
}
}
+func RgwServiceName(storeName string) string {
+ return rgwPrefix + "-" + storeName
+}
+
// create a CephObjectStore and wait for it to report ready status
-func createCephObjectStore(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, tlsEnable bool) {
+func createCephObjectStore(t *testing.T, helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, namespace, storeName string, replicaSize int, tlsEnable bool, swiftAndKeystone bool) {
logger.Infof("Create Object Store %q with replica count %d", storeName, replicaSize)
- rgwServiceName := "rook-ceph-rgw-" + storeName
if tlsEnable {
t.Run("generate TLS certs", func(t *testing.T) {
- generateRgwTlsCertSecret(t, helper, k8sh, namespace, storeName, rgwServiceName)
+ generateRgwTlsCertSecret(t, helper, k8sh, namespace, storeName, RgwServiceName(storeName))
})
}
t.Run("create CephObjectStore", func(t *testing.T) {
- err := helper.ObjectClient.Create(namespace, storeName, int32(replicaSize), tlsEnable)
+ err := helper.ObjectClient.Create(namespace, storeName, int32(replicaSize), tlsEnable, swiftAndKeystone)
assert.Nil(t, err)
})
t.Run("wait for RGWs to be running", func(t *testing.T) {
// check that ObjectStore is created
logger.Infof("Check that RGW pods are Running")
- for i := 0; i < 24 && k8sh.CheckPodCountAndState("rook-ceph-rgw", namespace, 1, "Running") == false; i++ {
+ for i := 0; i < 24 && k8sh.CheckPodCountAndState(rgwPrefix, namespace, 1, "Running") == false; i++ {
logger.Infof("(%d) RGW pod check sleeping for 5 seconds ...", i)
time.Sleep(5 * time.Second)
}
- assert.True(t, k8sh.CheckPodCountAndState("rook-ceph-rgw", namespace, replicaSize, "Running"))
+ assert.True(t, k8sh.CheckPodCountAndState(rgwPrefix, namespace, replicaSize, "Running"))
logger.Info("RGW pods are running")
- assert.NoError(t, k8sh.WaitForLabeledDeploymentsToBeReady("app=rook-ceph-rgw", namespace))
+ assert.NoError(t, k8sh.WaitForLabeledDeploymentsToBeReady("app="+rgwPrefix, namespace))
logger.Infof("Object store %q created successfully", storeName)
})
@@ -148,7 +153,7 @@ func createCephObjectStore(t *testing.T, helper *clients.TestClient, k8sh *utils
t.Run("verify RGW liveness probes show healthy", func(t *testing.T) {
err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 90*time.Second, true, func(ctx context.Context) (done bool, err error) {
- deployName := "rook-ceph-rgw-" + storeName + "-a"
+ deployName := RgwServiceName(storeName) + "-a"
d, err := k8sh.Clientset.AppsV1().Deployments(namespace).Get(ctx, deployName, metav1.GetOptions{})
if err != nil {
logger.Infof("waiting for rgw deployment %q to be ready; failed to get deployment: %v", deployName, err)
@@ -164,7 +169,7 @@ func createCephObjectStore(t *testing.T, helper *clients.TestClient, k8sh *utils
})
t.Run("verify RGW service is up", func(t *testing.T) {
- assert.True(t, k8sh.IsServiceUp("rook-ceph-rgw-"+storeName, namespace))
+ assert.True(t, k8sh.IsServiceUp(RgwServiceName(storeName), namespace))
})
t.Run("check if the dashboard-admin user exists in all existing object stores", func(t *testing.T) {
diff --git a/tests/integration/ceph_cosi_test.go b/tests/integration/ceph_cosi_test.go
index 732771aef395..a4cfa8050941 100644
--- a/tests/integration/ceph_cosi_test.go
+++ b/tests/integration/ceph_cosi_test.go
@@ -38,7 +38,7 @@ func testCOSIDriver(s *suite.Suite, helper *clients.TestClient, k8sh *utils.K8sH
assert.NoError(t, err, "failed to create COSI controller")
})
- createCephObjectStore(s.T(), helper, k8sh, cephinstaller, namespace, objectStoreCOSI, 1, false)
+ createCephObjectStore(s.T(), helper, k8sh, cephinstaller, namespace, objectStoreCOSI, 1, false, false)
t.Run("Creating CephCOSIDriver CRD", func(t *testing.T) {
err := helper.COSIClient.CreateCOSI()
diff --git a/tests/integration/ceph_helm_test.go b/tests/integration/ceph_helm_test.go
index 4c64f1713cda..884573f60f29 100644
--- a/tests/integration/ceph_helm_test.go
+++ b/tests/integration/ceph_helm_test.go
@@ -105,5 +105,5 @@ func (h *HelmSuite) TestFileStoreOnRookInstalledViaHelm() {
func (h *HelmSuite) TestObjectStoreOnRookInstalledViaHelm() {
deleteStore := true
tls := false
- runObjectE2ETestLite(h.T(), h.helper, h.k8shelper, h.installer, h.settings.Namespace, "default", 3, deleteStore, tls)
+ runObjectE2ETestLite(h.T(), h.helper, h.k8shelper, h.installer, h.settings.Namespace, "default", 3, deleteStore, tls, false)
}
diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go
index ed859ce7d16c..d51dd59b4be5 100644
--- a/tests/integration/ceph_object_test.go
+++ b/tests/integration/ceph_object_test.go
@@ -19,6 +19,7 @@ package integration
import (
"context"
"encoding/json"
+ "k8s.io/apimachinery/pkg/api/errors"
"testing"
"time"
@@ -31,7 +32,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -95,8 +95,12 @@ func (s *ObjectSuite) TestWithTLS() {
}
tls := true
+ swiftAndKeystone := false
objectStoreServicePrefix = objectStoreServicePrefixUniq
- runObjectE2ETest(s.helper, s.k8sh, s.installer, &s.Suite, s.settings.Namespace, tls)
+ runObjectE2ETest(s.helper, s.k8sh, s.installer, &s.Suite, s.settings.Namespace, tls, swiftAndKeystone)
+ cleanUpTLS(s)
+}
+func cleanUpTLS(s *ObjectSuite) {
err := s.k8sh.Clientset.CoreV1().Secrets(s.settings.Namespace).Delete(context.TODO(), objectTLSSecretName, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
@@ -112,22 +116,23 @@ func (s *ObjectSuite) TestWithoutTLS() {
}
tls := false
+ swiftAndKeystone := false
objectStoreServicePrefix = objectStoreServicePrefixUniq
- runObjectE2ETest(s.helper, s.k8sh, s.installer, &s.Suite, s.settings.Namespace, tls)
+ runObjectE2ETest(s.helper, s.k8sh, s.installer, &s.Suite, s.settings.Namespace, tls, swiftAndKeystone)
}
// Smoke Test for ObjectStore - Test check the following operations on ObjectStore in order
// Create object store, Create User, Connect to Object Store, Create Bucket, Read/Write/Delete to bucket,
// Check issues in MGRs, Delete Bucket and Delete user
// Test for ObjectStore with and without TLS enabled
-func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, s *suite.Suite, namespace string, tlsEnable bool) {
+func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, installer *installer.CephInstaller, s *suite.Suite, namespace string, tlsEnable bool, swiftAndKeystone bool) {
storeName := "test-store"
if tlsEnable {
storeName = objectStoreTLSName
}
logger.Infof("Running on Rook Cluster %s", namespace)
- createCephObjectStore(s.T(), helper, k8sh, installer, namespace, storeName, 3, tlsEnable)
+ createCephObjectStore(s.T(), helper, k8sh, installer, namespace, storeName, 3, tlsEnable, swiftAndKeystone)
// test that a second object store can be created (and deleted) while the first exists
s.T().Run("run a second object store", func(t *testing.T) {
@@ -135,14 +140,14 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, install
// The lite e2e test is perfect, as it only creates a cluster, checks that it is healthy,
// and then deletes it.
deleteStore := true
- runObjectE2ETestLite(t, helper, k8sh, installer, namespace, otherStoreName, 1, deleteStore, tlsEnable)
+ runObjectE2ETestLite(t, helper, k8sh, installer, namespace, otherStoreName, 1, deleteStore, tlsEnable, swiftAndKeystone)
})
// now test operation of the first object store
- testObjectStoreOperations(s, helper, k8sh, namespace, storeName)
+ testObjectStoreOperations(s, helper, k8sh, namespace, storeName, swiftAndKeystone)
bucketNotificationTestStoreName := "bucket-notification-" + storeName
- createCephObjectStore(s.T(), helper, k8sh, installer, namespace, bucketNotificationTestStoreName, 1, tlsEnable)
+ createCephObjectStore(s.T(), helper, k8sh, installer, namespace, bucketNotificationTestStoreName, 1, tlsEnable, swiftAndKeystone)
testBucketNotifications(s, helper, k8sh, namespace, bucketNotificationTestStoreName)
if !tlsEnable {
// TODO : need to fix COSI driver to support TLS
@@ -153,7 +158,7 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, install
}
}
-func testObjectStoreOperations(s *suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) {
+func testObjectStoreOperations(s *suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string, swiftAndKeystone bool) {
ctx := context.TODO()
clusterInfo := client.AdminTestClusterInfo(namespace)
t := s.T()
diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go
index c3fa2ec6fe68..9a9c3c8021e6 100644
--- a/tests/integration/ceph_smoke_test.go
+++ b/tests/integration/ceph_smoke_test.go
@@ -129,7 +129,7 @@ func (s *SmokeSuite) TestObjectStorage_SmokeTest() {
storeName := "lite-store"
deleteStore := true
tls := false
- runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, storeName, 2, deleteStore, tls)
+ runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, storeName, 2, deleteStore, tls, false)
}
// Test to make sure all rook components are installed and Running
diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go
index 94b4fcb794a5..a31184e959de 100644
--- a/tests/integration/ceph_upgrade_test.go
+++ b/tests/integration/ceph_upgrade_test.go
@@ -315,7 +315,7 @@ func (s *UpgradeSuite) deployClusterforUpgrade(baseRookImage, objectUserID, preF
logger.Infof("Initializing object before the upgrade")
deleteStore := false
tls := false
- runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, installer.ObjectStoreName, 1, deleteStore, tls)
+ runObjectE2ETestLite(s.T(), s.helper, s.k8sh, s.installer, s.settings.Namespace, installer.ObjectStoreName, 1, deleteStore, tls, false)
}
logger.Infof("Initializing object user before the upgrade")
@@ -380,7 +380,7 @@ func (s *UpgradeSuite) verifyOperatorImage(expectedImage string) {
// verify that the operator spec is updated
version, err := k8sutil.GetDeploymentImage(context.TODO(), s.k8sh.Clientset, systemNamespace, operatorContainer, operatorContainer)
assert.NoError(s.T(), err)
- assert.Equal(s.T(), "rook/ceph:"+expectedImage, version)
+ assert.Contains(s.T(), "docker.io/rook/ceph:"+expectedImage, version)
}
func (s *UpgradeSuite) verifyRookUpgrade(numOSDs int) {
diff --git a/tests/scripts/create-dev-cluster.sh b/tests/scripts/create-dev-cluster.sh
index cead784ce4ff..c47c58215834 100755
--- a/tests/scripts/create-dev-cluster.sh
+++ b/tests/scripts/create-dev-cluster.sh
@@ -112,7 +112,7 @@ setup_minikube_env() {
minikube_driver="$(get_minikube_driver)"
echo "Setting up minikube env for profile '$ROOK_PROFILE_NAME' (using $minikube_driver driver)"
$MINIKUBE delete
- $MINIKUBE start --disk-size="$MINIKUBE_DISK_SIZE" --extra-disks="$MINIKUBE_EXTRA_DISKS" --driver "$minikube_driver" -n "$MINIKUBE_NODES"
+ $MINIKUBE start --disk-size="$MINIKUBE_DISK_SIZE" --extra-disks="$MINIKUBE_EXTRA_DISKS" --driver "$minikube_driver" -n "$MINIKUBE_NODES" $ROOK_MINIKUBE_EXTRA_ARGS
eval "$($MINIKUBE docker-env)"
}
diff --git a/tests/scripts/csiaddons.sh b/tests/scripts/csiaddons.sh
index b3ebb9f845f8..401061da3305 100755
--- a/tests/scripts/csiaddons.sh
+++ b/tests/scripts/csiaddons.sh
@@ -16,7 +16,7 @@
set -xEo pipefail
-CSIADDONS_VERSION="v0.8.0"
+CSIADDONS_VERSION="v0.9.0"
CSIADDONS_CRD_NAME="csiaddonsnodes.csiaddons.openshift.io"
CSIADDONS_CONTAINER_NAME="csi-addons"
diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh
index 6159d09d4795..cd10c8bb1a14 100755
--- a/tests/scripts/github-action-helper.sh
+++ b/tests/scripts/github-action-helper.sh
@@ -204,7 +204,7 @@ function build_rook() {
tests/scripts/validate_modified_files.sh build
docker images
if [[ "$build_type" == "build" ]]; then
- docker tag "$(docker images | awk '/build-/ {print $1}')" rook/ceph:local-build
+ docker tag "$(docker images | awk '/build-/ {print $1}')" docker.io/rook/ceph:local-build
fi
}
@@ -246,7 +246,7 @@ function create_cluster_prerequisites() {
function deploy_manifest_with_local_build() {
sed -i 's/.*ROOK_CSI_ENABLE_NFS:.*/ ROOK_CSI_ENABLE_NFS: \"true\"/g' $1
if [[ "$USE_LOCAL_BUILD" != "false" ]]; then
- sed -i "s|image: rook/ceph:.*|image: rook/ceph:local-build|g" $1
+ sed -i "s|image: docker.io/rook/ceph:.*|image: docker.io/rook/ceph:local-build|g" $1
fi
if [[ "$ALLOW_LOOP_DEVICES" = "true" ]]; then
sed -i "s|ROOK_CEPH_ALLOW_LOOP_DEVICES: \"false\"|ROOK_CEPH_ALLOW_LOOP_DEVICES: \"true\"|g" $1
@@ -640,7 +640,7 @@ function test_multus_connections() {
function create_operator_toolbox() {
cd deploy/examples
- sed -i "s|image: rook/ceph:.*|image: rook/ceph:local-build|g" toolbox-operator-image.yaml
+ sed -i "s|image: docker.io/rook/ceph:.*|image: docker.io/rook/ceph:local-build|g" toolbox-operator-image.yaml
kubectl create -f toolbox-operator-image.yaml
}
diff --git a/tests/scripts/multus/host-cfg-ds.yaml b/tests/scripts/multus/host-cfg-ds.yaml
index ca95d4f66798..113db80e9fb1 100644
--- a/tests/scripts/multus/host-cfg-ds.yaml
+++ b/tests/scripts/multus/host-cfg-ds.yaml
@@ -29,12 +29,12 @@ spec:
terminationGracePeriodSeconds: 0 # allow updating/deleting immediately
containers:
- name: test
- image: quay.io/ceph/ceph:v18
+ image: jonlabelle/network-tools
env:
- name: IFACE_NAME
value: eth0 # IFACE_NAME
command:
- - bash
+ - sh
- -x
- -c
args: