Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Syncing latest changes from upstream master for rook #682

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Documentation/Helm-Charts/operator-chart.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ The following table lists the configurable parameters of the rook-operator chart
| `csi.forceCephFSKernelClient` | Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS you may want to disable this setting. However, this will cause an issue during upgrades with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html) | `true` |
| `csi.grpcTimeoutInSeconds` | Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150 | `150` |
| `csi.imagePullPolicy` | Image pull policy | `"IfNotPresent"` |
| `csi.kubeApiBurst` | Burst to use while communicating with the kubernetes apiserver. | `nil` |
| `csi.kubeApiQPS` | QPS to use while communicating with the kubernetes apiserver. | `nil` |
| `csi.kubeletDirPath` | Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) | `/var/lib/kubelet` |
| `csi.logLevel` | Set logging level for cephCSI containers maintained by the cephCSI. Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. | `0` |
| `csi.nfs.enabled` | Enable the nfs csi driver | `false` |
Expand Down
6 changes: 6 additions & 0 deletions deploy/charts/rook-ceph/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -251,3 +251,9 @@ data:
CSI_RBD_ATTACH_REQUIRED: {{ .Values.csi.rbdAttachRequired | quote }}
CSI_NFS_ATTACH_REQUIRED: {{ .Values.csi.nfsAttachRequired | quote }}
{{- end }}
{{- if .Values.csi.kubeApiBurst }}
CSI_KUBE_API_BURST: {{ .Values.csi.kubeApiBurst | quote }}
{{- end }}
{{- if .Values.csi.kubeApiQPS }}
CSI_KUBE_API_QPS: {{ .Values.csi.kubeApiQPS | quote }}
{{- end }}
6 changes: 6 additions & 0 deletions deploy/charts/rook-ceph/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,12 @@ csi:
# -- Allow starting an unsupported ceph-csi image
allowUnsupportedVersion: false

# -- Burst to use while communicating with the kubernetes apiserver.
kubeApiBurst:

# -- QPS to use while communicating with the kubernetes apiserver.
kubeApiQPS:

# -- The volume of the CephCSI RBD plugin DaemonSet
csiRBDPluginVolume:
# - name: lib-modules
Expand Down
6 changes: 6 additions & 0 deletions deploy/examples/operator-openshift.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,12 @@ data:
# requests:
# cpu: 200m
# memory: 128Mi

# (Optional) Burst to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_BURST: "10"

# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
---
# The deployment for the rook operator
# OLM: BEGIN OPERATOR DEPLOYMENT
Expand Down
6 changes: 6 additions & 0 deletions deploy/examples/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,12 @@ data:
# requests:
# cpu: 100m
# memory: 128Mi

# (Optional) Burst to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_BURST: "10"

# (Optional) QPS to use while communicating with the kubernetes apiserver.
# CSI_KUBE_API_QPS: "5.0"
---
# OLM: BEGIN OPERATOR DEPLOYMENT
apiVersion: apps/v1
Expand Down
22 changes: 22 additions & 0 deletions pkg/operator/ceph/csi/csi.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,5 +333,27 @@ func (r *ReconcileCSI) setParams(ver *version.Info) error {
CSIParam.EnableVolumeGroupSnapshot = false
}

kubeApiBurst := k8sutil.GetValue(r.opConfig.Parameters, "CSI_KUBE_API_BURST", "")
CSIParam.KubeApiBurst = 0
if kubeApiBurst != "" {
k, err := strconv.ParseUint(kubeApiBurst, 10, 16)
if err != nil {
logger.Errorf("failed to parse CSI_KUBE_API_BURST. %v", err)
} else {
CSIParam.KubeApiBurst = uint16(k)
}
}

kubeApiQPS := k8sutil.GetValue(r.opConfig.Parameters, "CSI_KUBE_API_QPS", "")
CSIParam.KubeApiQPS = 0
if kubeApiQPS != "" {
k, err := strconv.ParseFloat(kubeApiQPS, 32)
if err != nil {
logger.Errorf("failed to parse CSI_KUBE_API_QPS. %v", err)
} else {
CSIParam.KubeApiQPS = float32(k)
}
}

return nil
}
23 changes: 11 additions & 12 deletions pkg/operator/ceph/csi/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ type Param struct {
CephFSLivenessMetricsPort uint16
CSIAddonsPort uint16
RBDLivenessMetricsPort uint16
KubeApiBurst uint16
KubeApiQPS float32
LeaderElectionLeaseDuration time.Duration
LeaderElectionRenewDeadline time.Duration
LeaderElectionRetryPeriod time.Duration
Expand Down Expand Up @@ -359,23 +361,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI
tp.Param.MountCustomCephConf = CustomCSICephConfigExists

if EnableRBD {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName)
rbdPlugin, err = templateToDaemonSet("rbdplugin", RBDPluginTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load rbdplugin template")
}
if tp.CSILogRotation {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName)
applyLogrotateSidecar(&rbdPlugin.Spec.Template, "csi-rbd-daemonset-log-collector", LogrotateTemplatePath, tp)
}

tp.CsiComponentName = controllerPlugin
rbdProvisionerDeployment, err = templateToDeployment("rbd-provisioner", RBDProvisionerDepTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load rbd provisioner deployment template")
}
if tp.CSILogRotation {
tp.CsiComponentName = controllerPlugin
tp.CsiLogRootPath = path.Join(csiRootPath, RBDDriverName)
applyLogrotateSidecar(&rbdProvisionerDeployment.Spec.Template, "csi-rbd-deployment-log-collector", LogrotateTemplatePath, tp)
}

Expand All @@ -397,23 +398,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI
})
}
if EnableCephFS {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName)
cephfsPlugin, err = templateToDaemonSet("cephfsplugin", CephFSPluginTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load CephFS plugin template")
}
if tp.CSILogRotation {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName)
applyLogrotateSidecar(&cephfsPlugin.Spec.Template, "csi-cephfs-daemonset-log-collector", LogrotateTemplatePath, tp)
}

tp.CsiComponentName = controllerPlugin
cephfsProvisionerDeployment, err = templateToDeployment("cephfs-provisioner", CephFSProvisionerDepTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load rbd provisioner deployment template")
}
if tp.CSILogRotation {
tp.CsiComponentName = controllerPlugin
tp.CsiLogRootPath = path.Join(csiRootPath, CephFSDriverName)
applyLogrotateSidecar(&cephfsProvisionerDeployment.Spec.Template, "csi-cephfs-deployment-log-collector", LogrotateTemplatePath, tp)
}

Expand All @@ -436,23 +436,22 @@ func (r *ReconcileCSI) startDrivers(ver *version.Info, ownerInfo *k8sutil.OwnerI
}

if EnableNFS {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName)
nfsPlugin, err = templateToDaemonSet("nfsplugin", NFSPluginTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load nfs plugin template")
}
if tp.CSILogRotation {
tp.CsiComponentName = nodePlugin
tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName)
applyLogrotateSidecar(&nfsPlugin.Spec.Template, "csi-nfs-daemonset-log-collector", LogrotateTemplatePath, tp)
}

tp.CsiComponentName = controllerPlugin
nfsProvisionerDeployment, err = templateToDeployment("nfs-provisioner", NFSProvisionerDepTemplatePath, tp)
if err != nil {
return errors.Wrap(err, "failed to load nfs provisioner deployment template")
}
if tp.CSILogRotation {
tp.CsiComponentName = controllerPlugin
tp.CsiLogRootPath = path.Join(csiRootPath, NFSDriverName)
applyLogrotateSidecar(&nfsProvisionerDeployment.Spec.Template, "csi-nfs-deployment-log-collector", LogrotateTemplatePath, tp)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ spec:
- "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}"
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
Expand All @@ -58,6 +64,12 @@ spec:
{{ if .VolumeGroupSnapshotSupported }}
- "--enable-volume-group-snapshots={{ .EnableVolumeGroupSnapshot }}"
{{ end }}
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -79,6 +91,12 @@ spec:
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -101,6 +119,12 @@ spec:
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--extra-create-metadata=true"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@ spec:
- "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}"
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
Expand All @@ -54,6 +60,12 @@ spec:
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--extra-create-metadata=true"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -75,6 +87,12 @@ spec:
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -96,6 +114,12 @@ spec:
- "--leader-election-lease-duration={{ .LeaderElectionLeaseDuration }}"
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ spec:
{{ if .EnableCSITopology }}
- "--feature-gates=Topology=true"
{{ end }}
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -61,6 +67,12 @@ spec:
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--handle-volume-inuse-error=false"
- "--feature-gates=RecoverVolumeExpansionFailure=true"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand All @@ -81,6 +93,12 @@ spec:
- "--leader-election-renew-deadline={{ .LeaderElectionRenewDeadline }}"
- "--leader-election-retry-period={{ .LeaderElectionRetryPeriod }}"
- "--default-fstype=ext4"
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: /csi/csi-provisioner.sock
Expand All @@ -105,6 +123,12 @@ spec:
{{ if .VolumeGroupSnapshotSupported }}
- "--enable-volume-group-snapshots={{ .EnableVolumeGroupSnapshot }}"
{{ end }}
{{ if .KubeApiBurst }}
- "--kube-api-burst={{ .KubeApiBurst }}"
{{ end }}
{{ if .KubeApiQPS }}
- "--kube-api-qps={{ .KubeApiQPS }}"
{{ end }}
env:
- name: ADDRESS
value: unix:///csi/csi-provisioner.sock
Expand Down
Loading