From 80541a23c9959e066e0ea5fd512e61994ad49536 Mon Sep 17 00:00:00 2001 From: "yite.gu" Date: Thu, 4 Jul 2024 17:26:33 +0800 Subject: [PATCH 1/2] csi: make kube apiserver qps configurable This commit adds the flexibility to configure kube apiserver qps as per the user requirement and also keeps the existing values as the default one. Signed-off-by: yite.gu --- Documentation/Helm-Charts/operator-chart.md | 2 ++ .../charts/rook-ceph/templates/configmap.yaml | 6 +++++ deploy/charts/rook-ceph/values.yaml | 6 +++++ deploy/examples/operator-openshift.yaml | 6 +++++ deploy/examples/operator.yaml | 6 +++++ pkg/operator/ceph/csi/csi.go | 22 +++++++++++++++++ pkg/operator/ceph/csi/spec.go | 2 ++ .../csi-cephfsplugin-provisioner-dep.yaml | 24 +++++++++++++++++++ .../nfs/csi-nfsplugin-provisioner-dep.yaml | 24 +++++++++++++++++++ .../rbd/csi-rbdplugin-provisioner-dep.yaml | 24 +++++++++++++++++++ 10 files changed, 122 insertions(+) diff --git a/Documentation/Helm-Charts/operator-chart.md b/Documentation/Helm-Charts/operator-chart.md index 0683e455ed15..4cb9cc560efd 100644 --- a/Documentation/Helm-Charts/operator-chart.md +++ b/Documentation/Helm-Charts/operator-chart.md @@ -100,6 +100,8 @@ The following table lists the configurable parameters of the rook-operator chart | `csi.forceCephFSKernelClient` | Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS you may want to disable this setting. However, this will cause an issue during upgrades with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html) | `true` | | `csi.grpcTimeoutInSeconds` | Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150 | `150` | | `csi.imagePullPolicy` | Image pull policy | `"IfNotPresent"` | +| `csi.kubeApiBurst` | Burst to use while communicating with the kubernetes apiserver. | `nil` | +| `csi.kubeApiQPS` | QPS to use while communicating with the kubernetes apiserver. | `nil` | | `csi.kubeletDirPath` | Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) | `/var/lib/kubelet` | | `csi.logLevel` | Set logging level for cephCSI containers maintained by the cephCSI. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `0` | | `csi.nfs.enabled` | Enable the nfs csi driver | `false` | diff --git a/deploy/charts/rook-ceph/templates/configmap.yaml b/deploy/charts/rook-ceph/templates/configmap.yaml index 2d502b8ce294..13c8e96c9235 100644 --- a/deploy/charts/rook-ceph/templates/configmap.yaml +++ b/deploy/charts/rook-ceph/templates/configmap.yaml @@ -251,3 +251,9 @@ data: CSI_RBD_ATTACH_REQUIRED: {{ .Values.csi.rbdAttachRequired | quote }} CSI_NFS_ATTACH_REQUIRED: {{ .Values.csi.nfsAttachRequired | quote }} {{- end }} +{{- if .Values.csi.kubeApiBurst }} + CSI_KUBE_API_BURST: {{ .Values.csi.kubeApiBurst | quote }} +{{- end }} +{{- if .Values.csi.kubeApiQPS }} + CSI_KUBE_API_QPS: {{ .Values.csi.kubeApiQPS | quote }} +{{- end }} diff --git a/deploy/charts/rook-ceph/values.yaml b/deploy/charts/rook-ceph/values.yaml index 4b48607d1791..88b1327b32ff 100644 --- a/deploy/charts/rook-ceph/values.yaml +++ b/deploy/charts/rook-ceph/values.yaml @@ -186,6 +186,12 @@ csi: # -- Allow starting an unsupported ceph-csi image allowUnsupportedVersion: false + # -- Burst to use while communicating with the kubernetes apiserver. + kubeApiBurst: + + # -- QPS to use while communicating with the kubernetes apiserver. + kubeApiQPS: + # -- The volume of the CephCSI RBD plugin DaemonSet csiRBDPluginVolume: # - name: lib-modules diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 4082002fc011..1eade3604046 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -632,6 +632,12 @@ data: # requests: # cpu: 200m # memory: 128Mi + + # (Optional) Burst to use while communicating with the kubernetes apiserver. + # CSI_KUBE_API_BURST: "10" + + # (Optional) QPS to use while communicating with the kubernetes apiserver. + # CSI_KUBE_API_QPS: "5.0" --- # The deployment for the rook operator # OLM: BEGIN OPERATOR DEPLOYMENT diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 51d4dbd17b4b..df1bdb2a8fbd 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -557,6 +557,12 @@ data: # requests: # cpu: 100m # memory: 128Mi + + # (Optional) Burst to use while communicating with the kubernetes apiserver. + # CSI_KUBE_API_BURST: "10" + + # (Optional) QPS to use while communicating with the kubernetes apiserver. + # CSI_KUBE_API_QPS: "5.0" --- # OLM: BEGIN OPERATOR DEPLOYMENT apiVersion: apps/v1 diff --git a/pkg/operator/ceph/csi/csi.go b/pkg/operator/ceph/csi/csi.go index 36690285e0bf..3cee5a5103c4 100644 --- a/pkg/operator/ceph/csi/csi.go +++ b/pkg/operator/ceph/csi/csi.go @@ -330,5 +330,27 @@ func (r *ReconcileCSI) setParams(ver *version.Info) error { CSIParam.EnableVolumeGroupSnapshot = false } + kubeApiBurst := k8sutil.GetValue(r.opConfig.Parameters, "CSI_KUBE_API_BURST", "") + CSIParam.KubeApiBurst = 0 + if kubeApiBurst != "" { + k, err := strconv.ParseUint(kubeApiBurst, 10, 16) + if err != nil { + logger.Errorf("failed to parse CSI_KUBE_API_BURST. %v", err) + } else { + CSIParam.KubeApiBurst = uint16(k) + } + } + + kubeApiQPS := k8sutil.GetValue(r.opConfig.Parameters, "CSI_KUBE_API_QPS", "") + CSIParam.KubeApiQPS = 0 + if kubeApiQPS != "" { + k, err := strconv.ParseFloat(kubeApiQPS, 32) + if err != nil { + logger.Errorf("failed to parse CSI_KUBE_API_QPS. %v", err) + } else { + CSIParam.KubeApiQPS = float32(k) + } + } + return nil } diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index a02a98338b3d..e0c30e4489d6 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -91,6 +91,8 @@ type Param struct { CephFSLivenessMetricsPort uint16 CSIAddonsPort uint16 RBDLivenessMetricsPort uint16 + KubeApiBurst uint16 + KubeApiQPS float32 LeaderElectionLeaseDuration time.Duration LeaderElectionRenewDeadline time.Duration LeaderElectionRetryPeriod time.Duration diff --git a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml index de249e1c950f..336783be5378 100644 --- a/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/cephfs/csi-cephfsplugin-provisioner-dep.yaml @@ -34,6 +34,12 @@ spec: - "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}" - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: /csi/csi-provisioner.sock @@ -58,6 +64,12 @@ spec: {{ if .VolumeGroupSnapshotSupported }} - "--enable-volume-group-snapshots={{ .EnableVolumeGroupSnapshot }}" {{ end }} + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -79,6 +91,12 @@ spec: - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--handle-volume-inuse-error=false" - "--feature-gates=RecoverVolumeExpansionFailure=true" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -101,6 +119,12 @@ spec: - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--extra-create-metadata=true" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock diff --git a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml index 84706dd7251c..a9e2124a355c 100644 --- a/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/nfs/csi-nfsplugin-provisioner-dep.yaml @@ -33,6 +33,12 @@ spec: - "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}" - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: /csi/csi-provisioner.sock @@ -54,6 +60,12 @@ spec: - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--extra-create-metadata=true" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -75,6 +87,12 @@ spec: - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--handle-volume-inuse-error=false" - "--feature-gates=RecoverVolumeExpansionFailure=true" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -96,6 +114,12 @@ spec: - "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}" - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock diff --git a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml index 2265b0182136..d735e7ab78ae 100644 --- a/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml +++ b/pkg/operator/ceph/csi/template/rbd/csi-rbdplugin-provisioner-dep.yaml @@ -41,6 +41,12 @@ spec: {{ if .EnableCSITopology }} - "--feature-gates=Topology=true" {{ end }} + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -61,6 +67,12 @@ spec: - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--handle-volume-inuse-error=false" - "--feature-gates=RecoverVolumeExpansionFailure=true" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock @@ -81,6 +93,12 @@ spec: - "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}" - "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}" - "--default-fstype=ext4" + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: /csi/csi-provisioner.sock @@ -105,6 +123,12 @@ spec: {{ if .VolumeGroupSnapshotSupported }} - "--enable-volume-group-snapshots={{ .EnableVolumeGroupSnapshot }}" {{ end }} + {{ if .KubeApiBurst }} + - "--kube-api-burst={{ .KubeApiBurst }}" + {{ end }} + {{ if .KubeApiQPS }} + - "--kube-api-qps={{ .KubeApiQPS }}" + {{ end }} env: - name: ADDRESS value: unix:///csi/csi-provisioner.sock From cb87c3a845f042378351dcc486a86801876ee398 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 17 Jul 2024 08:15:30 +0200 Subject: [PATCH 2/2] csi: fix template render in logRotation the templateParam need to be updated with the right values before we are rendering the template. Signed-off-by: Madhu Rajanna --- pkg/operator/ceph/csi/spec.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/pkg/operator/ceph/csi/spec.go b/pkg/operator/ceph/csi/spec.go index de8bc9dd5143..8d2d7cc0e3dc 100644 --- a/pkg/operator/ceph/csi/spec.go +++ b/pkg/operator/ceph/csi/spec.go @@ -361,23 +361,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI tp.Param.MountCustomCephConf = CustomCSICephConfigExists if EnableRBD { + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName) rbdPlugin, err = templateToDaemonSet("rbdplugin", RBDPluginTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load rbdplugin template") } if tp.CSILogRotation { - tp.CsiComponentName = nodePlugin - tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName) applyLogrotateSidecar(&rbdPlugin.Spec.Template, "csi-rbd-daemonset-log-collector", LogrotateTemplatePath, tp) } + tp.CsiComponentName = controllerPlugin rbdProvisionerDeployment, err = templateToDeployment("rbd-provisioner", RBDProvisionerDepTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load rbd provisioner deployment template") } if tp.CSILogRotation { - tp.CsiComponentName = controllerPlugin - tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName) applyLogrotateSidecar(&rbdProvisionerDeployment.Spec.Template, "csi-rbd-deployment-log-collector", LogrotateTemplatePath, tp) } @@ -399,23 +398,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI }) } if EnableCephFS { + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName) cephfsPlugin, err = templateToDaemonSet("cephfsplugin", CephFSPluginTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load CephFS plugin template") } if tp.CSILogRotation { - tp.CsiComponentName = nodePlugin - tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName) applyLogrotateSidecar(&cephfsPlugin.Spec.Template, "csi-cephfs-daemonset-log-collector", LogrotateTemplatePath, tp) } + tp.CsiComponentName = controllerPlugin cephfsProvisionerDeployment, err = templateToDeployment("cephfs-provisioner", CephFSProvisionerDepTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load rbd provisioner deployment template") } if tp.CSILogRotation { - tp.CsiComponentName = controllerPlugin - tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName) applyLogrotateSidecar(&cephfsProvisionerDeployment.Spec.Template, "csi-cephfs-deployment-log-collector", LogrotateTemplatePath, tp) } @@ -438,23 +436,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI } if EnableNFS { + tp.CsiComponentName = nodePlugin + tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName) nfsPlugin, err = templateToDaemonSet("nfsplugin", NFSPluginTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load nfs plugin template") } if tp.CSILogRotation { - tp.CsiComponentName = nodePlugin - tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName) applyLogrotateSidecar(&nfsPlugin.Spec.Template, "csi-nfs-daemonset-log-collector", LogrotateTemplatePath, tp) } + tp.CsiComponentName = controllerPlugin nfsProvisionerDeployment, err = templateToDeployment("nfs-provisioner", NFSProvisionerDepTemplatePath, tp) if err != nil { return errors.Wrap(err, "failed to load nfs provisioner deployment template") } if tp.CSILogRotation { - tp.CsiComponentName = controllerPlugin - tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName) applyLogrotateSidecar(&nfsProvisionerDeployment.Spec.Template, "csi-nfs-deployment-log-collector", LogrotateTemplatePath, tp) }