diff --git a/.gitignore b/.gitignore index 5e56e040..c40bea5e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ /bin +/tests/files/helm +/tests/files/.dockerenv +/tests/files/.kubeconfig diff --git a/Makefile b/Makefile index ad24b7a7..bb434e75 100644 --- a/Makefile +++ b/Makefile @@ -21,10 +21,10 @@ dockerbuildpush: .PHONY: cidockerbuildpush cidockerbuildpush: - docker build -t metalstack/csi-lvmplugin-provisioner:${DOCKER_TAG} . -f cmd/provisioner/Dockerfile - docker build -t metalstack/lvmplugin:${DOCKER_TAG} . - docker push metalstack/lvmplugin:${DOCKER_TAG} - docker push metalstack/csi-lvmplugin-provisioner:${DOCKER_TAG} + docker build -t metalstack/csi-lvmplugin-provisioner:${TEST_TAG} . -f cmd/provisioner/Dockerfile + docker build -t metalstack/lvmplugin:${TEST_TAG} . + docker push metalstack/lvmplugin:${TEST_TAG} + docker push metalstack/csi-lvmplugin-provisioner:${TEST_TAG} .PHONY: tests tests: | start-test build-provisioner build-plugin build-test do-test clean-test diff --git a/cmd/lvmplugin/main.go b/cmd/lvmplugin/main.go index a40a56cf..b3a0ed25 100644 --- a/cmd/lvmplugin/main.go +++ b/cmd/lvmplugin/main.go @@ -35,7 +35,7 @@ func init() { var ( endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") - driverName = flag.String("drivername", "lvm.csi.k8s.io", "name of the driver") + driverName = flag.String("drivername", "lvm.csi.metal-stack.io", "name of the driver") nodeID = flag.String("nodeid", "", "node id") ephemeral = flag.Bool("ephemeral", false, "publish volumes in ephemeral mode even if kubelet did not ask for it (only needed for Kubernetes 1.15)") maxVolumesPerNode = flag.Int64("maxvolumespernode", 0, "limit of volumes per node") diff --git a/examples/csi-app-mirror.yaml b/examples/csi-app-mirror.yaml index bd740545..3cb77ffb 100644 --- a/examples/csi-app-mirror.yaml +++ b/examples/csi-app-mirror.yaml @@ -13,4 +13,4 @@ spec: volumes: - name: my-csi-volume-mirror persistentVolumeClaim: - claimName: csi-pvc-mirror # defined in csi-pvs.yaml + claimName: csi-pvc-mirror diff --git a/examples/csi-app.yaml b/examples/csi-app.yaml index d1881945..2337b8fe 100644 --- a/examples/csi-app.yaml +++ b/examples/csi-app.yaml @@ -13,4 +13,4 @@ spec: volumes: - name: my-csi-volume persistentVolumeClaim: - claimName: csi-pvc # defined in csi-pvs.yaml + claimName: csi-pvc diff --git a/examples/csi-pvc-mirror.yaml b/examples/csi-pvc-mirror.yaml index 4dd04109..86ed4527 100644 --- a/examples/csi-pvc-mirror.yaml +++ b/examples/csi-pvc-mirror.yaml @@ -8,4 +8,4 @@ spec: resources: requests: storage: 100Mi - storageClassName: csi-lvm-sc-mirror # defined in csi-setup.yaml + storageClassName: csi-lvm-sc-mirror diff --git a/examples/csi-pvc.yaml b/examples/csi-pvc.yaml index 424e135b..a7012541 100644 --- a/examples/csi-pvc.yaml +++ b/examples/csi-pvc.yaml @@ -8,4 +8,4 @@ spec: resources: requests: storage: 100Mi - storageClassName: csi-lvm-sc-linear # defined in csi-setup.yaml + storageClassName: csi-lvm-sc-linear diff --git a/examples/csi-storageclass-linear.yaml b/examples/csi-storageclass-linear.yaml index 22232a92..05cd1e92 100644 --- a/examples/csi-storageclass-linear.yaml +++ b/examples/csi-storageclass-linear.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-lvm-sc-linear -provisioner: lvm.csi.k8s.io +provisioner: lvm.csi.metal-stack.io reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true diff --git a/examples/csi-storageclass-mirror.yaml b/examples/csi-storageclass-mirror.yaml index 081f088f..3825d150 100644 --- a/examples/csi-storageclass-mirror.yaml +++ b/examples/csi-storageclass-mirror.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-lvm-sc-mirror -provisioner: lvm.csi.k8s.io +provisioner: lvm.csi.metal-stack.io reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true diff --git a/examples/csi-storageclass-striped.yaml b/examples/csi-storageclass-striped.yaml index 856c8d0b..5b07bf9e 100644 --- a/examples/csi-storageclass-striped.yaml +++ b/examples/csi-storageclass-striped.yaml @@ -2,7 +2,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-lvm-sc-striped -provisioner: lvm.csi.k8s.io +provisioner: lvm.csi.metal-stack.io reclaimPolicy: Delete volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true diff --git a/examples/mytest.yaml b/examples/mytest.yaml index 9b7387ec..73b342e7 100644 --- a/examples/mytest.yaml +++ b/examples/mytest.yaml @@ -8,7 +8,7 @@ spec: resources: requests: storage: 100Mi - storageClassName: csi-lvm-sc-linear # defined in csi-setup.yaml + storageClassName: csi-lvm-sc-linear --- kind: Pod apiVersion: v1 @@ -25,4 +25,4 @@ spec: volumes: - name: my-csi-volume persistentVolumeClaim: - claimName: csi-pvc # defined in csi-pvs.yaml + claimName: csi-pvc diff --git a/examples/rook/README.md b/examples/rook/README.md deleted file mode 100644 index 353397da..00000000 --- a/examples/rook/README.md +++ /dev/null @@ -1,23 +0,0 @@ -### Example files for installation of rook on csi-lvm backed PVCs - -* install csi-driver-lvm (see ../../README.md) -* install rook operator -* install rook cluster -* install rook storage classes -* install basic psp for the mysql/wordpress example (if needed) -* install a single mysql instance on a rook-ceph-block ReadWriteOnce PVC -* install cephfs filesystem -* install a wordpress deployment with 3 replicas on a cephfs shared ReadWriteMany filesytem - - -``` -kubectl apply -f examples/rook/common.yaml -kubectl apply -f examples/rook/operator.yaml -kubectl apply -f examples/rook/cluster-on-lvm.yaml -kubectl apply -f examples/rook/storageclass-rbd.yaml -kubectl apply -f examples/rook/storageclass-cephfs.yaml -kubectl apply -f examples/rook/psp.yaml -kubectl apply -f examples/rook/mysql.yaml -kubectl apply -f examples/rook/filesystem.yaml -kubectl apply -f examples/rook/wordpress.yaml -``` diff --git a/examples/rook/cluster-on-lvm.yaml b/examples/rook/cluster-on-lvm.yaml deleted file mode 100644 index c08e1d61..00000000 --- a/examples/rook/cluster-on-lvm.yaml +++ /dev/null @@ -1,122 +0,0 @@ -################################################################################################################# -# Define the settings for the rook-ceph cluster with common settings for a production cluster. -# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required -# in this example. See the documentation for more details on storage settings available. - -# For example, to create the cluster: -# kubectl create -f common.yaml -# kubectl create -f operator.yaml -# kubectl create -f cluster-on-pvc.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: rook-ceph - namespace: rook-ceph -spec: - dataDirHostPath: /var/lib/rook - mon: - count: 3 - allowMultiplePerNode: false - # A volume claim template can be specified in which case new monitors (and - # monitors created during fail over) will construct a PVC based on the - # template for the monitor's primary storage. Changes to the template do not - # affect existing monitors. Log data is stored on the HostPath under - # dataDirHostPath. If no storage requirement is specified, a default storage - # size appropriate for monitor data will be used. - volumeClaimTemplate: - spec: - storageClassName: csi-lvm-sc-striped - resources: - requests: - storage: 10Gi - cephVersion: - #image: ceph/ceph:v15.2 - image: ceph/ceph:v14.2.8 - allowUnsupported: true - skipUpgradeChecks: false - continueUpgradeAfterChecksEvenIfNotHealthy: false - dashboard: - enabled: true - ssl: true - network: - hostNetwork: false - crashCollector: - disable: false - storage: - storageClassDeviceSets: - - name: set1 - # The number of OSDs to create from this device set - count: 3 - # IMPORTANT: If volumes specified by the storageClassName are not portable across nodes - # this needs to be set to false. For example, if using the local storage provisioner - # this should be false. - portable: false - # Certain storage class in the Cloud are slow - # Rook can configure the OSD running on PVC to accommodate that by tuning some of the Ceph internal - # Currently, "gp2" has been identified as such - tuneSlowDeviceClass: true - # Since the OSDs could end up on any node, an effort needs to be made to spread the OSDs - # across nodes as much as possible. Unfortunately the pod anti-affinity breaks down - # as soon as you have more than one OSD per node. If you have more OSDs than nodes, K8s may - # choose to schedule many of them on the same node. What we need is the Pod Topology - # Spread Constraints, which is alpha in K8s 1.16. This means that a feature gate must be - # enabled for this feature, and Rook also still needs to add support for this feature. - # Another approach for a small number of OSDs is to create a separate device set for each - # zone (or other set of nodes with a common label) so that the OSDs will end up on different - # nodes. This would require adding nodeAffinity to the placement here. - placement: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-osd - - key: app - operator: In - values: - - rook-ceph-osd-prepare - topologyKey: "kubernetes.io/hostname" - resources: - # limits: - # cpu: "500m" - # memory: "4Gi" - # requests: - # cpu: "500m" - # memory: "4Gi" - volumeClaimTemplates: - - metadata: - name: data - # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph - # annotations: - # crushDeviceClass: hybrid - spec: - resources: - requests: - storage: 100Gi - # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - storageClassName: csi-lvm-sc-striped - volumeMode: Block - accessModes: - - ReadWriteOnce - # dedicated block device to store bluestore database (block.db) - # - metadata: - # name: metadata - # spec: - # resources: - # requests: - # # Find the right size https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#sizing - # storage: 5Gi - # # IMPORTANT: Change the storage class depending on your environment (e.g. local-storage, gp2) - # storageClassName: csi-lvm-sc-striped - # volumeMode: Block - # accessModes: - # - ReadWriteOnce - disruptionManagement: - managePodBudgets: false - osdMaintenanceTimeout: 30 - manageMachineDisruptionBudgets: false - machineDisruptionBudgetNamespace: openshift-machine-api diff --git a/examples/rook/common.yaml b/examples/rook/common.yaml deleted file mode 100644 index ef18ca12..00000000 --- a/examples/rook/common.yaml +++ /dev/null @@ -1,1710 +0,0 @@ -################################################################################################################### -# Create the common resources that are necessary to start the operator and the ceph cluster. -# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. -# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. -# -# If the operator needs to manage multiple clusters (in different namespaces), see the section below -# for "cluster-specific resources". The resources below that section will need to be created for each namespace -# where the operator needs to manage the cluster. The resources above that section do not be created again. -# -# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) -################################################################################################################### - -# Namespace where the operator and other rook resources are created -apiVersion: v1 -kind: Namespace -metadata: - name: rook-ceph -# OLM: BEGIN CEPH CRD -# The CRD declarations ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclusters.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - annotations: {} - cephVersion: - properties: - allowUnsupported: - type: boolean - image: - type: string - dashboard: - properties: - enabled: - type: boolean - urlPrefix: - type: string - port: - type: integer - minimum: 0 - maximum: 65535 - ssl: - type: boolean - dataDirHostPath: - pattern: ^/(\S+) - type: string - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - skipUpgradeChecks: - type: boolean - continueUpgradeAfterChecksEvenIfNotHealthy: - type: boolean - mon: - properties: - allowMultiplePerNode: - type: boolean - count: - maximum: 9 - minimum: 0 - type: integer - volumeClaimTemplate: {} - mgr: - properties: - modules: - items: - properties: - name: - type: string - enabled: - type: boolean - network: - properties: - hostNetwork: - type: boolean - provider: - type: string - selectors: {} - storage: - properties: - disruptionManagement: - properties: - machineDisruptionBudgetNamespace: - type: string - managePodBudgets: - type: boolean - osdMaintenanceTimeout: - type: integer - manageMachineDisruptionBudgets: - type: boolean - useAllNodes: - type: boolean - nodes: - items: - properties: - name: - type: string - config: - properties: - metadataDevice: - type: string - storeType: - type: string - pattern: ^(filestore|bluestore)$ - databaseSizeMB: - type: string - walSizeMB: - type: string - journalSizeMB: - type: string - osdsPerDevice: - type: string - encryptedDevice: - type: string - pattern: ^(true|false)$ - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - devices: - type: array - items: - properties: - name: - type: string - config: {} - resources: {} - type: array - useAllDevices: - type: boolean - deviceFilter: - type: string - devicePathFilter: - type: string - config: {} - storageClassDeviceSets: {} - monitoring: - properties: - enabled: - type: boolean - rulesNamespace: - type: string - rbdMirroring: - properties: - workers: - type: integer - removeOSDsIfOutAndSafeToRemove: - type: boolean - external: - properties: - enable: - type: boolean - placement: {} - resources: {} - # somehow this is breaking the status, but let's keep this here so we don't forget it once we move to controller-runtime - # subresources: - # status: {} - additionalPrinterColumns: - - name: DataDirHostPath - type: string - description: Directory used on the K8s nodes - JSONPath: .spec.dataDirHostPath - - name: MonCount - type: string - description: Number of MONs - JSONPath: .spec.mon.count - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - - name: Phase - type: string - description: Phase - JSONPath: .status.phase - - name: Message - type: string - description: Message - JSONPath: .status.message - - name: Health - type: string - description: Ceph Health - JSONPath: .status.ceph.health -# OLM: END CEPH CRD -# OLM: BEGIN CEPH CLIENT CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephclients.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephClient - listKind: CephClientList - plural: cephclients - singular: cephclient - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - caps: - type: object - subresources: - status: {} -# OLM: END CEPH CLIENT CRD -# OLM: BEGIN CEPH FS CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephfilesystems.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephFilesystem - listKind: CephFilesystemList - plural: cephfilesystems - singular: cephfilesystem - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - metadataServer: - properties: - activeCount: - minimum: 1 - maximum: 10 - type: integer - activeStandby: - type: boolean - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - dataPools: - type: array - items: - properties: - failureDomain: - type: string - replicated: - properties: - size: - minimum: 0 - maximum: 10 - type: integer - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - minimum: 0 - maximum: 10 - type: integer - codingChunks: - minimum: 0 - maximum: 10 - type: integer - preservePoolsOnDelete: - type: boolean - additionalPrinterColumns: - - name: ActiveMDS - type: string - description: Number of desired active MDS daemons - JSONPath: .spec.metadataServer.activeCount - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - subresources: - status: {} -# OLM: END CEPH FS CRD -# OLM: BEGIN CEPH NFS CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephnfses.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephNFS - listKind: CephNFSList - plural: cephnfses - singular: cephnfs - shortNames: - - nfs - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - rados: - properties: - pool: - type: string - namespace: - type: string - server: - properties: - active: - type: integer - annotations: {} - placement: {} - resources: {} - subresources: - status: {} -# OLM: END CEPH NFS CRD -# OLM: BEGIN CEPH OBJECT STORE CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstores.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStore - listKind: CephObjectStoreList - plural: cephobjectstores - singular: cephobjectstore - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - gateway: - properties: - type: - type: string - sslCertificateRef: {} - port: - type: integer - minimum: 1 - maximum: 65535 - securePort: {} - instances: - type: integer - annotations: {} - placement: {} - resources: {} - metadataPool: - properties: - failureDomain: - type: string - replicated: - properties: - size: - type: integer - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - dataPool: - properties: - failureDomain: - type: string - replicated: - properties: - size: - type: integer - erasureCoded: - properties: - dataChunks: - type: integer - codingChunks: - type: integer - preservePoolsOnDelete: - type: boolean - subresources: - status: {} -# OLM: END CEPH OBJECT STORE CRD -# OLM: BEGIN CEPH OBJECT STORE USERS CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephobjectstoreusers.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephObjectStoreUser - listKind: CephObjectStoreUserList - plural: cephobjectstoreusers - singular: cephobjectstoreuser - shortNames: - - rcou - - objectuser - scope: Namespaced - version: v1 - subresources: - status: {} -# OLM: END CEPH OBJECT STORE USERS CRD -# OLM: BEGIN CEPH BLOCK POOL CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: cephblockpools.ceph.rook.io -spec: - group: ceph.rook.io - names: - kind: CephBlockPool - listKind: CephBlockPoolList - plural: cephblockpools - singular: cephblockpool - scope: Namespaced - version: v1 - validation: - openAPIV3Schema: - properties: - spec: - properties: - failureDomain: - type: string - replicated: - properties: - size: - type: integer - minimum: 0 - maximum: 9 - targetSizeRatio: - type: number - requireSafeReplicaSize: - type: boolean - erasureCoded: - properties: - dataChunks: - type: integer - minimum: 0 - maximum: 9 - codingChunks: - type: integer - minimum: 0 - maximum: 9 - subresources: - status: {} -# OLM: END CEPH BLOCK POOL CRD -# OLM: BEGIN CEPH VOLUME POOL CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: volumes.rook.io -spec: - group: rook.io - names: - kind: Volume - listKind: VolumeList - plural: volumes - singular: volume - shortNames: - - rv - scope: Namespaced - version: v1alpha2 - subresources: - status: {} -# OLM: END CEPH VOLUME POOL CRD -# OLM: BEGIN OBJECTBUCKET CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbuckets.objectbucket.io -spec: - group: objectbucket.io - versions: - - name: v1alpha1 - served: true - storage: true - names: - kind: ObjectBucket - listKind: ObjectBucketList - plural: objectbuckets - singular: objectbucket - shortNames: - - ob - - obs - scope: Cluster - subresources: - status: {} -# OLM: END OBJECTBUCKET CRD -# OLM: BEGIN OBJECTBUCKETCLAIM CRD ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: objectbucketclaims.objectbucket.io -spec: - versions: - - name: v1alpha1 - served: true - storage: true - group: objectbucket.io - names: - kind: ObjectBucketClaim - listKind: ObjectBucketClaimList - plural: objectbucketclaims - singular: objectbucketclaim - shortNames: - - obc - - obcs - scope: Namespaced - subresources: - status: {} -# OLM: END OBJECTBUCKETCLAIM CRD -# OLM: BEGIN OBJECTBUCKET ROLEBINDING ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-object-bucket -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-object-bucket -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph -# OLM: END OBJECTBUCKET ROLEBINDING -# OLM: BEGIN OPERATOR ROLE ---- -# The cluster role for managing all the cluster-specific resources in a namespace -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: rook-ceph-cluster-mgmt - labels: - operator: rook - storage-backend: ceph -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true" -rules: [] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: rook-ceph-cluster-mgmt-rules - labels: - operator: rook - storage-backend: ceph - rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true" -rules: -- apiGroups: - - "" - resources: - - secrets - - pods - - pods/log - - services - - configmaps - verbs: - - get - - list - - watch - - patch - - create - - update - - delete -- apiGroups: - - apps - resources: - - deployments - - daemonsets - verbs: - - get - - list - - watch - - create - - update - - delete ---- -# The role for the operator to manage resources in its own namespace -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: rook-ceph-system - namespace: rook-ceph - labels: - operator: rook - storage-backend: ceph -rules: -- apiGroups: - - "" - resources: - - pods - - configmaps - - services - verbs: - - get - - list - - watch - - patch - - create - - update - - delete -- apiGroups: - - apps - resources: - - daemonsets - - statefulsets - - deployments - verbs: - - get - - list - - watch - - create - - update - - delete ---- -# The cluster role for managing the Rook CRDs -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true" -rules: [] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: rook-ceph-global-rules - labels: - operator: rook - storage-backend: ceph - rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true" -rules: -- apiGroups: - - "" - resources: - # Pod access is needed for fencing - - pods - # Node access is needed for determining nodes where mons should run - - nodes - - nodes/proxy - - services - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - # PVs and PVCs are managed by the Rook provisioner - - persistentvolumes - - persistentvolumeclaims - - endpoints - verbs: - - get - - list - - watch - - patch - - create - - update - - delete -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" -- apiGroups: - - rook.io - resources: - - "*" - verbs: - - "*" -- apiGroups: - - policy - - apps - resources: - # This is for the clusterdisruption controller - - poddisruptionbudgets - # This is for both clusterdisruption and nodedrain controllers - - deployments - - replicasets - verbs: - - "*" -- apiGroups: - - healthchecking.openshift.io - resources: - - machinedisruptionbudgets - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - machine.openshift.io - resources: - - machines - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - storage.k8s.io - resources: - - csidrivers - verbs: - - create ---- -# Aspects of ceph-mgr that require cluster-wide access -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-cluster - labels: - operator: rook - storage-backend: ceph -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-cluster-rules - labels: - operator: rook - storage-backend: ceph - rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true" -rules: -- apiGroups: - - "" - resources: - - configmaps - - nodes - - nodes/proxy - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - list - - get - - watch ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-object-bucket - labels: - operator: rook - storage-backend: ceph - rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true" -rules: -- apiGroups: - - "" - verbs: - - "*" - resources: - - secrets - - configmaps -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - "objectbucket.io" - verbs: - - "*" - resources: - - "*" -# OLM: END OPERATOR ROLE -# OLM: BEGIN SERVICE ACCOUNT SYSTEM ---- -# The rook system service account used by the operator, agent, and discovery pods -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-system - namespace: rook-ceph - labels: - operator: rook - storage-backend: ceph -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT SYSTEM -# OLM: BEGIN OPERATOR ROLEBINDING ---- -# Grant the operator, agent, and discovery agents access to resources in the namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-system - namespace: rook-ceph - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-system -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph ---- -# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-global -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph -# OLM: END OPERATOR ROLEBINDING -################################################################################################################# -# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph" -# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles -# and bindings accordingly. -################################################################################################################# -# Service account for the Ceph OSDs. Must exist and cannot be renamed. -# OLM: BEGIN SERVICE ACCOUNT OSD ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-osd - namespace: rook-ceph -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT OSD -# OLM: BEGIN SERVICE ACCOUNT MGR ---- -# Service account for the Ceph Mgr. Must exist and cannot be renamed. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-mgr - namespace: rook-ceph -# imagePullSecrets: -# - name: my-registry-secret - -# OLM: END SERVICE ACCOUNT MGR -# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph -# OLM: END CMD REPORTER SERVICE ACCOUNT -# OLM: BEGIN CLUSTER ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-osd - namespace: rook-ceph -rules: -- apiGroups: [""] - resources: ["configmaps"] - verbs: [ "get", "list", "watch", "create", "update", "delete" ] -- apiGroups: ["ceph.rook.io"] - resources: ["cephclusters", "cephclusters/finalizers"] - verbs: [ "get", "list", "create", "update", "delete" ] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-osd -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list ---- -# Aspects of ceph-mgr that require access to the system namespace -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-system -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-system-rules - labels: - rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true" -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -# Aspects of ceph-mgr that operate within the cluster's namespace -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr - namespace: rook-ceph -rules: -- apiGroups: - - "" - resources: - - pods - - services - - pods/log - verbs: - - get - - list - - watch - - delete -- apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list - - watch - - create - - update - - delete -- apiGroups: - - ceph.rook.io - resources: - - "*" - verbs: - - "*" -# OLM: END CLUSTER ROLE -# OLM: BEGIN CMD REPORTER ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph -rules: -- apiGroups: - - "" - resources: - - pods - - configmaps - verbs: - - get - - list - - watch - - create - - update - - delete -# OLM: END CMD REPORTER ROLE -# OLM: BEGIN CLUSTER ROLEBINDING ---- -# Allow the operator to create resources in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: -- kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph ---- -# Allow the osd pods in this namespace to work with configmaps -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-osd - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph ---- -# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-mgr -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph ---- -# Allow the ceph mgr to access the rook system resources necessary for the mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-system - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-system -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph ---- -# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-mgr-cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-cluster -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph - ---- -# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-osd -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph - -# OLM: END CLUSTER ROLEBINDING -# OLM: BEGIN CMD REPORTER ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: -- kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph -# OLM: END CMD REPORTER ROLEBINDING -################################################################################################################# -# Beginning of pod security policy resources. The example will assume the cluster will be created in the -# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify -# the roles and bindings accordingly. -################################################################################################################# -# OLM: BEGIN CLUSTER POD SECURITY POLICY ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: rook-privileged -spec: - privileged: true - allowedCapabilities: - # required by CSI - - SYS_ADMIN - # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group - fsGroup: - rule: RunAsAny - # runAsUser, supplementalGroups - Rook needs to run some pods as root - # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - # seLinux - seLinux context is unknown ahead of time; set if this is well-known - seLinux: - rule: RunAsAny - volumes: - # recommended minimum set - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - projected - # required for Rook - - hostPath - - flexVolume - # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known - # directory-based OSDs make this hard to nail down - # allowedHostPaths: - # - pathPrefix: "/run/udev" # for OSD prep - # readOnly: false - # - pathPrefix: "/dev" # for OSD prep - # readOnly: false - # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to - # readOnly: false - # Ceph requires host IPC for setting up encrypted devices - hostIPC: true - # Ceph OSDs need to share the same PID namespace - hostPID: true - # hostNetwork can be set to 'false' if host networking isn't used - hostNetwork: true - hostPorts: - # Ceph messenger protocol v1 - - min: 6789 - max: 6790 # <- support old default port - # Ceph messenger protocol v2 - - min: 3300 - max: 3300 - # Ceph RADOS ports for OSDs, MDSes - - min: 6800 - max: 7300 - # # Ceph dashboard port HTTP (not recommended) - # - min: 7000 - # max: 7000 - # Ceph dashboard port HTTPS - - min: 8443 - max: 8443 - # Ceph mgr Prometheus Metrics - - min: 9283 - max: 9283 -# OLM: END CLUSTER POD SECURITY POLICY -# OLM: BEGIN POD SECURITY POLICY BINDINGS ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: 'psp:rook' -rules: - - apiGroups: - - policy - resources: - - podsecuritypolicies - resourceNames: - - rook-privileged - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-ceph-system-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-default-psp - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: -- kind: ServiceAccount - name: default - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-osd-psp - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: -- kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-mgr-psp - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: -- kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-cmd-reporter-psp - namespace: rook-ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: -- kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph -# OLM: END CLUSTER POD SECURITY POLICY BINDINGS -# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph -# OLM: END CSI CEPHFS SERVICE ACCOUNT -# OLM: BEGIN CSI CEPHFS ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: rook-ceph - name: cephfs-external-provisioner-cfg -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "create", "delete"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -# OLM: END CSI CEPHFS ROLE -# OLM: BEGIN CSI CEPHFS ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role-cfg - namespace: rook-ceph -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph -roleRef: - kind: Role - name: cephfs-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI CEPHFS ROLEBINDING -# OLM: BEGIN CSI CEPHFS CLUSTER ROLE ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin-rules - labels: - rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true" -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-runner -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-external-provisioner-runner-rules - labels: - rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true" -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -# OLM: END CSI CEPHFS CLUSTER ROLE -# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-cephfs-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph -roleRef: - kind: ClusterRole - name: cephfs-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph -roleRef: - kind: ClusterRole - name: cephfs-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI CEPHFS CLUSTER ROLEBINDING -# OLM: BEGIN CSI RBD SERVICE ACCOUNT ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph -# OLM: END CSI RBD SERVICE ACCOUNT -# OLM: BEGIN CSI RBD ROLE ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: rook-ceph - name: rbd-external-provisioner-cfg -rules: - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -# OLM: END CSI RBD ROLE -# OLM: BEGIN CSI RBD ROLEBINDING ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role-cfg - namespace: rook-ceph -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph -roleRef: - kind: Role - name: rbd-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI RBD ROLEBINDING -# OLM: BEGIN CSI RBD CLUSTER ROLE ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin-rules - labels: - rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true" -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list"] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-runner -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true" -rules: [] ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-external-provisioner-runner-rules - labels: - rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true" -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "list", "watch", "delete", "get", "update"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] -# OLM: END CSI RBD CLUSTER ROLE -# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-plugin-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rook-csi-rbd-provisioner-sa-psp -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph -roleRef: - kind: ClusterRole - name: rbd-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph -roleRef: - kind: ClusterRole - name: rbd-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io -# OLM: END CSI RBD CLUSTER ROLEBINDING diff --git a/examples/rook/filesystem.yaml b/examples/rook/filesystem.yaml deleted file mode 100644 index 81e48890..00000000 --- a/examples/rook/filesystem.yaml +++ /dev/null @@ -1,86 +0,0 @@ -################################################################################################################# -# Create a filesystem with settings with replication enabled for a production environment. -# A minimum of 3 OSDs on different nodes are required in this example. -# kubectl create -f filesystem.yaml -################################################################################################################# - -apiVersion: ceph.rook.io/v1 -kind: CephFilesystem -metadata: - name: myfs - namespace: rook-ceph -spec: - # The metadata pool spec. Must use replication. - metadataPool: - replicated: - size: 3 - requireSafeReplicaSize: true - # The list of data pool specs. Can use replication or erasure coding. - dataPools: - - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #targetSizeRatio: .5 - # Whether to preserve metadata and data pools on filesystem deletion - preservePoolsOnDelete: true - # The metadata service (mds) configuration - metadataServer: - # The number of active MDS instances - activeCount: 1 - # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. - # If false, standbys will be available, but will not have a warm cache. - activeStandby: true - # The affinity rules to apply to the mds deployment - placement: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: role - # operator: In - # values: - # - mds-node - # tolerations: - # - key: mds-node - # operator: Exists - # podAffinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: kubernetes.io/hostname will place MDS across different hosts - topologyKey: kubernetes.io/hostname - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - rook-ceph-mds - # topologyKey: */zone can be used to spread MDS across different AZ - # Use in k8s cluster if your cluster is v1.16 or lower - # Use in k8s cluster is v1.17 or upper - topologyKey: topology.kubernetes.io/zone - # A key/value list of annotations - annotations: - # key: value - resources: - # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory - # limits: - # cpu: "500m" - # memory: "1024Mi" - # requests: - # cpu: "500m" - # memory: "1024Mi" - # priorityClassName: my-priority-class diff --git a/examples/rook/mysql.yaml b/examples/rook/mysql.yaml deleted file mode 100644 index 89cd906a..00000000 --- a/examples/rook/mysql.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: wordpress-mysql - labels: - app: wordpress -spec: - ports: - - port: 3306 - selector: - app: wordpress - tier: mysql - clusterIP: None ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: mysql-pv-claim - labels: - app: wordpress -spec: - storageClassName: rook-ceph-block - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: wordpress-mysql - labels: - app: wordpress - tier: mysql -spec: - selector: - matchLabels: - app: wordpress - tier: mysql - strategy: - type: Recreate - template: - metadata: - labels: - app: wordpress - tier: mysql - spec: - serviceAccountName: example - serviceAccount: example - containers: - - image: mysql:5.6 - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: changeme - ports: - - containerPort: 3306 - name: mysql - volumeMounts: - - name: mysql-persistent-storage - mountPath: /var/lib/mysql - volumes: - - name: mysql-persistent-storage - persistentVolumeClaim: - claimName: mysql-pv-claim diff --git a/examples/rook/operator.yaml b/examples/rook/operator.yaml deleted file mode 100644 index 3e8d94d4..00000000 --- a/examples/rook/operator.yaml +++ /dev/null @@ -1,337 +0,0 @@ -################################################################################################################# -# The deployment for the rook operator -# Contains the common settings for most Kubernetes deployments. -# For example, to create the rook-ceph cluster: -# kubectl create -f common.yaml -# kubectl create -f operator.yaml -# kubectl create -f cluster.yaml -# -# Also see other operator sample files for variations of operator.yaml: -# - operator-openshift.yaml: Common settings for running in OpenShift -################################################################################################################# -# Rook Ceph Operator Config -# Use this ConfigMap to override operator configurations -# Precedence will be given to this config in case -# Env Var also exists for the same -# -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-ceph-operator-config - # should be in the namespace of the operator - namespace: rook-ceph -data: - # # (Optional) Ceph Provisioner NodeAffinity. - # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format. - # # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # CSI_PROVISIONER_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # # (Optional) Ceph CSI plugin NodeAffinity. - # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" - # # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format. - # # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # CSI_PLUGIN_TOLERATIONS: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists ---- -# OLM: BEGIN OPERATOR DEPLOYMENT -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rook-ceph-operator - namespace: rook-ceph - labels: - operator: rook - storage-backend: ceph -spec: - selector: - matchLabels: - app: rook-ceph-operator - replicas: 1 - template: - metadata: - labels: - app: rook-ceph-operator - spec: - serviceAccountName: rook-ceph-system - containers: - - name: rook-ceph-operator - image: rook/ceph:v1.2.7 - imagePullPolicy: Always - args: ["ceph", "operator"] - volumeMounts: - - mountPath: /var/lib/rook - name: rook-config - - mountPath: /etc/ceph - name: default-config-dir - env: - # If the operator should only watch for cluster CRDs in the same namespace, set this to "true". - # If this is not set to true, the operator will watch for cluster CRDs in all namespaces. - - name: ROOK_CURRENT_NAMESPACE_ONLY - value: "false" - - name: ROOK_LV_BACKED_PV - value: "true" - # To disable RBAC, uncomment the following: - # - name: RBAC_ENABLED - # value: "false" - # Rook Agent toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: AGENT_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate - # - name: AGENT_TOLERATION_KEY - # value: "" - # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format. - # - name: AGENT_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) Rook Agent priority class name to set on the pod(s) - # - name: AGENT_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Rook Agent NodeAffinity. - # - name: AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook,ceph" - # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. - # `Any` uses Ceph admin credentials by default/fallback. - # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and - # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. - # to the namespace in which the `mountSecret` Kubernetes secret namespace. - # - name: AGENT_MOUNT_SECURITY_MODE - # value: "Any" - # Set the path where the Rook agent can find the flex volumes - # - name: FLEXVOLUME_DIR_PATH - # value: "" - # Set the path where kernel modules can be found - # - name: LIB_MODULES_DIR_PATH - # value: "" - # Mount any extra directories into the agent container - # - name: AGENT_MOUNTS - # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" - # Rook Discover toleration. Will tolerate all taints with all keys. - # Choose between NoSchedule, PreferNoSchedule and NoExecute: - # - name: DISCOVER_TOLERATION - # value: "NoSchedule" - # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate - # - name: DISCOVER_TOLERATION_KEY - # value: "" - # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format. - # - name: DISCOVER_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) Rook Discover priority class name to set on the pod(s) - # - name: DISCOVER_PRIORITY_CLASS_NAME - # value: "" - # (Optional) Discover Agent NodeAffinity. - # - name: DISCOVER_AGENT_NODE_AFFINITY - # value: "role=storage-node; storage=rook, ceph" - # Allow rook to create multiple file systems. Note: This is considered - # an experimental feature in Ceph as described at - # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster - # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027 - - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS - value: "false" - - # The logging level for the operator: INFO | DEBUG - - name: ROOK_LOG_LEVEL - value: "INFO" - - # The interval to check the health of the ceph cluster and update the status in the custom resource. - - name: ROOK_CEPH_STATUS_CHECK_INTERVAL - value: "60s" - - # The interval to check if every mon is in the quorum. - - name: ROOK_MON_HEALTHCHECK_INTERVAL - value: "45s" - - # The duration to wait before trying to failover or remove/replace the - # current mon with a new mon (useful for compensating flapping network). - - name: ROOK_MON_OUT_TIMEOUT - value: "600s" - - # The duration between discovering devices in the rook-discover daemonset. - - name: ROOK_DISCOVER_DEVICES_INTERVAL - value: "60m" - - # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. - # This is necessary to workaround the anyuid issues when running on OpenShift. - # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 - - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED - value: "false" - - # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). - # Disable it here if you have similar issues. - # For more details see https://github.com/rook/rook/issues/2417 - - name: ROOK_ENABLE_SELINUX_RELABELING - value: "true" - - # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. - # For more details see https://github.com/rook/rook/issues/2254 - - name: ROOK_ENABLE_FSGROUP - value: "true" - - # Disable automatic orchestration when new devices are discovered - - name: ROOK_DISABLE_DEVICE_HOTPLUG - value: "false" - - # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+". - # In case of more than one regex, use comma to seperate between them. - # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - # Add regex expression after putting a comma to blacklist a disk - # If value is empty, the default regex will be used. - - name: DISCOVER_DAEMON_UDEV_BLACKLIST - value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" - - # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release - # in favor of the CSI driver. - - name: ROOK_ENABLE_FLEX_DRIVER - value: "false" - - # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. - # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. - - name: ROOK_ENABLE_DISCOVERY_DAEMON - value: "true" - - # Enable the default version of the CSI CephFS driver. To start another version of the CSI driver, see image properties below. - - name: ROOK_CSI_ENABLE_CEPHFS - value: "true" - - # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below. - - name: ROOK_CSI_ENABLE_RBD - value: "true" - - name: ROOK_CSI_ENABLE_GRPC_METRICS - value: "true" - # Enable deployment of snapshotter container in ceph-csi provisioner. - - name: CSI_ENABLE_SNAPSHOTTER - value: "true" - # Set logging level for csi containers. - # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. - #- name: CSI_LOG_LEVEL - # value: "0" - # Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs - # If you disable the kernel client, your application may be disrupted during upgrade. - # See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html - - name: CSI_FORCE_CEPHFS_KERNEL_CLIENT - value: "true" - # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - #- name: CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY - # value: "OnDelete" - # CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. - # Default value is RollingUpdate. - #- name: CSI_RBD_PLUGIN_UPDATE_STRATEGY - # value: "OnDelete" - # The default version of CSI supported by Rook will be started. To change the version - # of the CSI driver to something other than what is officially supported, change - # these images to the desired release of the CSI driver. - - name: ROOK_CSI_CEPH_IMAGE - value: "quay.io/cephcsi/cephcsi:v2.0.1" - #- name: ROOK_CSI_REGISTRAR_IMAGE - # value: "quay.io/k8scsi/csi-node-driver-registrar:v1.2.0" - #- name: ROOK_CSI_RESIZER_IMAGE - # value: "quay.io/k8scsi/csi-resizer:v0.4.0" - #- name: ROOK_CSI_PROVISIONER_IMAGE - # value: "quay.io/k8scsi/csi-provisioner:v1.4.0" - #- name: ROOK_CSI_SNAPSHOTTER_IMAGE - # value: "quay.io/k8scsi/csi-snapshotter:v1.2.2" - #- name: ROOK_CSI_ATTACHER_IMAGE - # value: "quay.io/k8scsi/csi-attacher:v2.1.0" - # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. - #- name: ROOK_CSI_KUBELET_DIR_PATH - # value: "/var/lib/kubelet" - # (Optional) Ceph Provisioner NodeAffinity. - # - name: CSI_PROVISIONER_NODE_AFFINITY - # value: "role=storage-node; storage=rook, ceph" - # (Optional) Allow starting unsupported ceph-csi image - - name: ROOK_CSI_ALLOW_UNSUPPORTED_VERSION - value: "false" - # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format. - # CSI provisioner would be best to start on the same nodes as other ceph daemons. - # - name: CSI_PROVISIONER_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # (Optional) Ceph CSI plugin NodeAffinity. - # - name: CSI_PLUGIN_NODE_AFFINITY - # value: "role=storage-node; storage=rook, ceph" - # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format. - # CSI plugins need to be started on all the nodes where the clients need to mount the storage. - # - name: CSI_PLUGIN_TOLERATIONS - # value: | - # - effect: NoSchedule - # key: node-role.kubernetes.io/controlplane - # operator: Exists - # - effect: NoExecute - # key: node-role.kubernetes.io/etcd - # operator: Exists - # Configure CSI cephfs grpc and liveness metrics port - #- name: CSI_CEPHFS_GRPC_METRICS_PORT - # value: "9091" - #- name: CSI_CEPHFS_LIVENESS_METRICS_PORT - # value: "9081" - # Configure CSI rbd grpc and liveness metrics port - #- name: CSI_RBD_GRPC_METRICS_PORT - # value: "9090" - #- name: CSI_RBD_LIVENESS_METRICS_PORT - # value: "9080" - - # Time to wait until the node controller will move Rook pods to other - # nodes after detecting an unreachable node. - # Pods affected by this setting are: - # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox - # The value used in this variable replaces the default value of 300 secs - # added automatically by k8s as Toleration for - # - # The total amount of time to reschedule Rook pods in healthy nodes - # before detecting a condition will be the sum of: - # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag) - # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds - - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS - value: "5" - - # The name of the node to pass with the downward API - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # The pod name to pass with the downward API - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - # The pod namespace to pass with the downward API - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - # Uncomment it to run rook operator on the host network - #hostNetwork: true - volumes: - - name: rook-config - emptyDir: {} - - name: default-config-dir - emptyDir: {} -# OLM: END OPERATOR DEPLOYMENT diff --git a/examples/rook/psp.yaml b/examples/rook/psp.yaml deleted file mode 100644 index 5ca5b16c..00000000 --- a/examples/rook/psp.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - name: example ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: psp-example -spec: - allowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - privileged: false - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: psp-example-role -rules: -- apiGroups: - - extensions - resources: - - podsecuritypolicies - resourceNames: - - psp-example - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: psp-rolebinding-example -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: psp-example-role -subjects: -- apiGroup: "" - kind: ServiceAccount - name: example diff --git a/examples/rook/storageclass-cephfs.yaml b/examples/rook/storageclass-cephfs.yaml deleted file mode 100644 index 34a6a764..00000000 --- a/examples/rook/storageclass-cephfs.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-cephfs -provisioner: rook-ceph.cephfs.csi.ceph.com -parameters: - # clusterID is the namespace where operator is deployed. - clusterID: rook-ceph - - # CephFS filesystem name into which the volume shall be created - fsName: myfs - - # Ceph pool into which the volume shall be created - # Required for provisionVolume: "true" - pool: myfs-data0 - - # Root path of an existing CephFS volume - # Required for provisionVolume: "false" - # rootPath: /absolute/path - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - - # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) - # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse - # or by setting the default mounter explicitly via --volumemounter command-line argument. - # mounter: kernel -reclaimPolicy: Delete -allowVolumeExpansion: true -mountOptions: - # uncomment the following line for debugging - #- debug diff --git a/examples/rook/storageclass-rbd.yaml b/examples/rook/storageclass-rbd.yaml deleted file mode 100644 index 75beeb38..00000000 --- a/examples/rook/storageclass-rbd.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - failureDomain: host - replicated: - size: 3 - # Disallow setting pool with replica 1, this could lead to data loss without recovery. - # Make sure you're *ABSOLUTELY CERTAIN* that is what you want - requireSafeReplicaSize: true - # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool - # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size - #targetSizeRatio: .5 ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: rook-ceph.rbd.csi.ceph.com -parameters: - # clusterID is the namespace where the rook cluster is running - # If you change this namespace, also change the namespace below where the secret namespaces are defined - clusterID: rook-ceph - - # If you want to use erasure coded pool with RBD, you need to create - # two pools. one erasure coded and one replicated. - # You need to specify the replicated pool here in the `pool` parameter, it is - # used for the metadata of the images. - # The erasure coded pool must be set as the `dataPool` parameter below. - #dataPool: ec-data-pool - pool: replicapool - - # RBD image format. Defaults to "2". - imageFormat: "2" - - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. - imageFeatures: layering - - # The secrets contain Ceph admin credentials. These are generated automatically by the operator - # in the same namespace as the cluster. - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - # Specify the filesystem type of the volume. If not specified, csi-provisioner - # will set default as `ext4`. - csi.storage.k8s.io/fstype: ext4 -# uncomment the following to use rbd-nbd as mounter on supported nodes -# **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi -# issue that causes the mount to be disconnected. You will need to follow special upgrade steps -# to restart your application pods. Therefore, this option is not recommended. -#mounter: rbd-nbd -allowVolumeExpansion: true -reclaimPolicy: Delete diff --git a/examples/rook/wordpress.yaml b/examples/rook/wordpress.yaml deleted file mode 100644 index ac3de07e..00000000 --- a/examples/rook/wordpress.yaml +++ /dev/null @@ -1,73 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: wordpress - labels: - app: wordpress -spec: - ports: - - port: 80 - selector: - app: wordpress - tier: frontend - type: LoadBalancer ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: wp-pv-claim - labels: - app: wordpress -spec: - storageClassName: rook-cephfs - accessModes: - - ReadWriteMany - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: wordpress - labels: - app: wordpress - tier: frontend -spec: - selector: - matchLabels: - app: wordpress - tier: frontend - #strategy: - # type: Recreate - replicas: 3 - template: - metadata: - labels: - app: wordpress - tier: frontend - spec: - serviceAccountName: example - serviceAccount: example - containers: - - image: wordpress:4.6.1-apache - name: wordpress - env: - - name: WORDPRESS_DB_HOST - value: wordpress-mysql - - name: WORDPRESS_DB_PASSWORD - value: changeme - ports: - - containerPort: 80 - name: wordpress - volumeMounts: - - name: wordpress-persistent-storage - mountPath: /var/www/html - resources: - limits: - memory: "8Gi" - cpu: "500m" - volumes: - - name: wordpress-persistent-storage - persistentVolumeClaim: - claimName: wp-pv-claim diff --git a/helm/csi-driver-lvm/Chart.yaml b/helm/csi-driver-lvm/Chart.yaml index a7678ace..46425c48 100644 --- a/helm/csi-driver-lvm/Chart.yaml +++ b/helm/csi-driver-lvm/Chart.yaml @@ -1,7 +1,7 @@ name: csi-driver-lvm -version: 0.3.7 +version: 0.4.0 description: local persistend storage for lvm -appVersion: v0.3.7 +appVersion: v0.4.0 apiVersion: v1 keywords: - storage diff --git a/helm/csi-driver-lvm/values.yaml b/helm/csi-driver-lvm/values.yaml index 158be5f7..54196c37 100644 --- a/helm/csi-driver-lvm/values.yaml +++ b/helm/csi-driver-lvm/values.yaml @@ -10,12 +10,12 @@ lvm: pluginImage: repository: metalstack/lvmplugin - tag: v0.3.7 + tag: v0.4.0 pullPolicy: IfNotPresent provisionerImage: repository: metalstack/csi-lvmplugin-provisioner - tag: v0.3.7 + tag: v0.4.0 pullPolicy: IfNotPresent rbac: diff --git a/tests/bats/test.bats b/tests/bats/test.bats index 64ef5e6c..ddd12b88 100644 --- a/tests/bats/test.bats +++ b/tests/bats/test.bats @@ -26,6 +26,12 @@ [ "$output" = "volume-test-inline,Running" ] } +@test "delete inline linear pod" { + run kubectl delete -f /files/inline.yaml + [ "$status" -eq 0 ] + [ "${lines[0]}" = "pod \"volume-test-inline\" deleted" ] +} + @test "create pvc" { run kubectl apply -f /files/pvc.yaml [ "$status" -eq 0 ] @@ -110,6 +116,7 @@ [ "${lines[1]}" = "persistentvolumeclaim \"lvm-pvc-linear\" deleted" ] } @test "clean up " { + run sleep 60 run helm uninstall ${DOCKER_TAG} -n ${DOCKER_TAG} run sleep 30 run kubectl delete ns ${DOCKER_TAG} diff --git a/tests/files/helm/csi-driver-lvm/.helmignore b/tests/files/helm/csi-driver-lvm/.helmignore deleted file mode 100644 index f0c13194..00000000 --- a/tests/files/helm/csi-driver-lvm/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/tests/files/helm/csi-driver-lvm/Chart.yaml b/tests/files/helm/csi-driver-lvm/Chart.yaml deleted file mode 100644 index a7678ace..00000000 --- a/tests/files/helm/csi-driver-lvm/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: csi-driver-lvm -version: 0.3.7 -description: local persistend storage for lvm -appVersion: v0.3.7 -apiVersion: v1 -keywords: -- storage -- block-storage -- volume -home: https://metal-stack.io -sources: -- https://github.com/metal-stack diff --git a/tests/files/helm/csi-driver-lvm/README.md b/tests/files/helm/csi-driver-lvm/README.md deleted file mode 100644 index 0dfa4396..00000000 --- a/tests/files/helm/csi-driver-lvm/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# CSI Driver LVM Helm Chart - -## TL;DR - -``` -$ helm install my-csi-driver-lvm --namespace default csi-driver-lvm --set lvm.devicePattern='/dev/nvme[0-9]n[0-9]' -``` diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-attacher.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-attacher.yaml deleted file mode 100644 index 4fddb26d..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-attacher.yaml +++ /dev/null @@ -1,65 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: csi-lvm-attacher - labels: - app: csi-lvm-attacher - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - selector: - app: csi-lvm-attacher - ports: - - name: dummy - port: 12345 - ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: csi-lvm-attacher - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - serviceName: "csi-lvm-attacher" - replicas: 1 - selector: - matchLabels: - app: csi-lvm-attacher - template: - metadata: - labels: - app: csi-lvm-attacher - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - csi-lvmplugin - topologyKey: kubernetes.io/hostname - serviceAccountName: csi-attacher - containers: - - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 - args: - - --v=5 - - --csi-address=/csi/csi.sock - securityContext: - # This is necessary only for systems with SELinux, where - # non-privileged sidecar containers cannot access unix domain socket - # created by privileged CSI driver container. - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - - volumes: - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins/{{ .Values.lvm.storageClassStub }} - type: DirectoryOrCreate - name: socket-dir diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-driverinfo.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-driverinfo.yaml deleted file mode 100644 index 5ab3cd09..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-driverinfo.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: {{ .Values.lvm.driverName }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - # Supports persistent and ephemeral inline volumes. - volumeLifecycleModes: - - Persistent - - Ephemeral - # To determine at runtime which mode a volume uses, pod info and its - # "csi.storage.k8s.io/ephemeral" entry are needed. - podInfoOnMount: true diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-deployment.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-deployment.yaml deleted file mode 100644 index 55ca00b4..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-deployment.yaml +++ /dev/null @@ -1,181 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: csi-lvmplugin - labels: - app: csi-lvmplugin - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - selector: - app: csi-lvmplugin - ports: - - name: dummy - port: 12345 ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: csi-lvmplugin -spec: - revisionHistoryLimit: 10 - selector: - matchLabels: - app: csi-lvmplugin - template: - metadata: - creationTimestamp: null - labels: - app: csi-lvmplugin - spec: - serviceAccountName: csi-lvmplugin - containers: - - args: - - --v=5 - - --csi-address=/csi/csi.sock - - --kubelet-registration-path={{ .Values.kubernetes.kubeletPath }}/plugins/{{ .Values.lvm.storageClassStub }}/csi.sock - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 - imagePullPolicy: IfNotPresent - name: node-driver-registrar - resources: {} - securityContext: - privileged: true - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: /registration - name: registration-dir - - args: - - --drivername={{ .Values.lvm.driverName }} - - --endpoint=$(CSI_ENDPOINT) - - --devices=$(CSI_DEVICESPATTERN) - - --nodeid=$(KUBE_NODE_NAME) - - --vgname=$(CSI_VGNAME) - - --namespace=$(CSI_NAMESPACE) - - --provisionerimage=$(CSI_PROVISIONER_IMAGE) - - --pullpolicy=$(CSI_PULL_POLICY) - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CSI_DEVICESPATTERN - value: {{ .Values.lvm.devicePattern }} - - name: CSI_VGNAME - value: {{ .Values.lvm.vgName }} - - name: CSI_NAMESPACE - value: {{ .Release.Namespace }} - - name: CSI_PROVISIONER_IMAGE - value: "{{ .Values.provisionerImage.repository }}:{{ .Values.provisionerImage.tag }}" - - name: CSI_PULL_POLICY - value: {{ .Values.provisionerImage.pullPolicy }} - image: "{{ .Values.pluginImage.repository }}:{{ .Values.pluginImage.tag }}" - imagePullPolicy: {{ .Values.pluginImage.pullPolicy }} - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: healthz - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 2 - successThreshold: 1 - timeoutSeconds: 3 - name: lvm - ports: - - containerPort: 9898 - name: healthz - protocol: TCP - resources: {} - securityContext: - privileged: true - terminationMessagePath: /termination.log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - - mountPath: {{ .Values.kubernetes.kubeletPath }}/pods - mountPropagation: Bidirectional - name: mountpoint-dir - - mountPath: {{ .Values.kubernetes.kubeletPath }}/plugins - mountPropagation: Bidirectional - name: plugins-dir - - mountPath: /dev - name: dev-dir - mountPropagation: Bidirectional - - mountPath: /lib/modules - name: mod-dir - - mountPath: /etc/lvm/backup - name: lvmbackup - mountPropagation: Bidirectional - - mountPath: /etc/lvm/cache - name: lvmcache - mountPropagation: Bidirectional - - mountPath: /run/lock/lvm - name: lvmlock - mountPropagation: Bidirectional - - args: - - --csi-address=/csi/csi.sock - - --health-port=9898 - image: quay.io/k8scsi/livenessprobe:v1.1.0 - imagePullPolicy: IfNotPresent - name: liveness-probe - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /csi - name: socket-dir - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - volumes: - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins/{{ .Values.lvm.storageClassStub }} - type: DirectoryOrCreate - name: socket-dir - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/pods - type: DirectoryOrCreate - name: mountpoint-dir - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins_registry - type: Directory - name: registration-dir - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins - type: Directory - name: plugins-dir - - hostPath: - path: /dev - type: Directory - name: dev-dir - - hostPath: - path: /lib/modules - type: Directory - name: mod-dir - - hostPath: - path: /etc/lvm/backup - type: DirectoryOrCreate - name: lvmbackup - - hostPath: - path: /etc/lvm/cache - type: DirectoryOrCreate - name: lvmcache - - hostPath: - path: /run/lock/lvm - type: DirectoryOrCreate - name: lvmlock diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-rbac.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-rbac.yaml deleted file mode 100644 index 7e07dc01..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-plugin-rbac.yaml +++ /dev/null @@ -1,106 +0,0 @@ -{{- if .Values.rbac.create }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-lvmplugin - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-lvmplugin-runner-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-lvmplugin-role-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-lvmplugin - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: external-lvmplugin-runner-{{ .Release.Name }} - apiGroup: rbac.authorization.k8s.io - ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-lvmplugin-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-lvmplugin-role-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-lvmplugin - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: external-lvmplugin-cfg - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-lvmplugin - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "watch", "list", "create", "delete"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-lvmplugin-role - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-lvmplugin - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: csi-lvmplugin - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-provisioner.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-provisioner.yaml deleted file mode 100644 index 09a80c98..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-provisioner.yaml +++ /dev/null @@ -1,65 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: csi-lvm-provisioner - labels: - app: csi-lvm-provisioner - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - selector: - app: csi-lvm-provisioner - ports: - - name: dummy - port: 12345 - ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: csi-lvm-provisioner - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - serviceName: "csi-lvm-provisioner" - replicas: 1 - selector: - matchLabels: - app: csi-lvm-provisioner - template: - metadata: - labels: - app: csi-lvm-provisioner - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - csi-lvmplugin - topologyKey: kubernetes.io/hostname - serviceAccountName: csi-provisioner - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 - args: - - -v=5 - - --csi-address=/csi/csi.sock - - --feature-gates=Topology=true - securityContext: - # This is necessary only for systems with SELinux, where - # non-privileged sidecar containers cannot access unix domain socket - # created by privileged CSI driver container. - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins/{{ .Values.lvm.storageClassStub }} - type: DirectoryOrCreate - name: socket-dir diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-resizer.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-lvm-resizer.yaml deleted file mode 100644 index 3b10ffe9..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-lvm-resizer.yaml +++ /dev/null @@ -1,64 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: csi-lvm-resizer - labels: - app: csi-lvm-resizer - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - selector: - app: csi-lvm-resizer - ports: - - name: dummy - port: 12345 - ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: csi-lvm-resizer - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - serviceName: "csi-lvm-resizer" - replicas: 1 - selector: - matchLabels: - app: csi-lvm-resizer - template: - metadata: - labels: - app: csi-lvm-resizer - spec: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - csi-lvmplugin - topologyKey: kubernetes.io/hostname - serviceAccountName: csi-resizer - containers: - - name: csi-resizer - image: quay.io/k8scsi/csi-resizer:v0.5.0 - args: - - -v=5 - - -csi-address=/csi/csi.sock - securityContext: - # This is necessary only for systems with SELinux, where - # non-privileged sidecar containers cannot access unix domain socket - # created by privileged CSI driver container. - privileged: true - volumeMounts: - - mountPath: /csi - name: socket-dir - volumes: - - hostPath: - path: {{ .Values.kubernetes.kubeletPath }}/plugins/{{ .Values.lvm.storageClassStub }} - type: DirectoryOrCreate - name: socket-dir diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-linear.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-linear.yaml deleted file mode 100644 index 6fe20935..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-linear.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ .Values.lvm.storageClassStub }}-sc-linear - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -provisioner: {{ .Values.lvm.driverName }} -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -parameters: - type: "linear" diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-mirror.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-mirror.yaml deleted file mode 100644 index b613091e..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-mirror.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ .Values.lvm.storageClassStub }}-sc-mirror - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -provisioner: {{ .Values.lvm.driverName }} -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -parameters: - type: mirror diff --git a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-striped.yaml b/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-striped.yaml deleted file mode 100644 index 4f0fb94d..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/csi-storageclass-striped.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ .Values.lvm.storageClassStub }}-sc-striped - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -provisioner: {{ .Values.lvm.driverName }} -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true -parameters: - type: "striped" diff --git a/tests/files/helm/csi-driver-lvm/templates/external-attacher-rbac.yaml b/tests/files/helm/csi-driver-lvm/templates/external-attacher-rbac.yaml deleted file mode 100644 index ff7b821a..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/external-attacher-rbac.yaml +++ /dev/null @@ -1,75 +0,0 @@ -{{- if .Values.rbac.create }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-attacher - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-attacher-runner-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update", "patch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-attacher-role-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-attacher - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: external-attacher-runner-{{ .Release.Name }} - apiGroup: rbac.authorization.k8s.io - ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-attacher-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-attacher-role-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-attacher - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: external-attacher-cfg - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/tests/files/helm/csi-driver-lvm/templates/external-provisioner-rbac.yaml b/tests/files/helm/csi-driver-lvm/templates/external-provisioner-rbac.yaml deleted file mode 100644 index 7fec5b9a..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/external-provisioner-rbac.yaml +++ /dev/null @@ -1,95 +0,0 @@ -{{- if .Values.rbac.create }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-provisioner - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-provisioner-runner-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["get", "list"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-provisioner-role-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-provisioner - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: external-provisioner-runner-{{ .Release.Name }} - apiGroup: rbac.authorization.k8s.io - ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-provisioner-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-provisioner-role-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-provisioner - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io - -{{- end }} diff --git a/tests/files/helm/csi-driver-lvm/templates/external-resizer-rbac.yaml b/tests/files/helm/csi-driver-lvm/templates/external-resizer-rbac.yaml deleted file mode 100644 index 71809e50..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/external-resizer-rbac.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if .Values.rbac.create }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-resizer - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-resizer-runner-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-resizer-role-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-resizer - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: external-resizer-runner-{{ .Release.Name }} - apiGroup: rbac.authorization.k8s.io - ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-resizer-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-resizer-role-cfg - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -subjects: - - kind: ServiceAccount - name: csi-resizer - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: external-resizer-cfg - apiGroup: rbac.authorization.k8s.io - {{- end }} diff --git a/tests/files/helm/csi-driver-lvm/templates/psp.yaml b/tests/files/helm/csi-driver-lvm/templates/psp.yaml deleted file mode 100644 index cd1aa7c9..00000000 --- a/tests/files/helm/csi-driver-lvm/templates/psp.yaml +++ /dev/null @@ -1,87 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} - -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: psp-lvm-{{ .Release.Name }} - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -spec: - allowPrivilegeEscalation: true - fsGroup: - rule: RunAsAny - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: psp-lvm-role - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -rules: -- apiGroups: - - extensions - resources: - - podsecuritypolicies - resourceNames: - - psp-lvm-{{ .Release.Name }} - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: psp-rolebinding-lvm - labels: - heritage: {{ .Release.Service }} - release: {{ .Release.Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: psp-lvm-role -subjects: -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm-attacher -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm-provisioner -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm-resizer -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm-snapshotter -- apiGroup: "" - kind: ServiceAccount - name: csi-lvm-socat -- apiGroup: "" - kind: ServiceAccount - name: csi-attacher -- apiGroup: "" - kind: ServiceAccount - name: csi-provisioner -- apiGroup: "" - kind: ServiceAccount - name: csi-resizer -- apiGroup: "" - kind: ServiceAccount - name: csi-snapshotter -- apiGroup: "" - kind: ServiceAccount - name: csi-lvmplugin - -{{- end }} diff --git a/tests/files/helm/csi-driver-lvm/values.yaml b/tests/files/helm/csi-driver-lvm/values.yaml deleted file mode 100644 index 158be5f7..00000000 --- a/tests/files/helm/csi-driver-lvm/values.yaml +++ /dev/null @@ -1,26 +0,0 @@ - -lvm: - # This one you should change - devicePattern: /dev/nvme[0-9]n[0-9] - - # these are primariliy for testing purposes - vgName: csi-lvm - driverName: lvm.csi.metal-stack.io - storageClassStub: csi-lvm - -pluginImage: - repository: metalstack/lvmplugin - tag: v0.3.7 - pullPolicy: IfNotPresent - -provisionerImage: - repository: metalstack/csi-lvmplugin-provisioner - tag: v0.3.7 - pullPolicy: IfNotPresent - -rbac: - create: true - pspEnabled: true - -kubernetes: - kubeletPath: /var/lib/kubelet