From b226de6de5f2d6a66ec4cd25a4f6075cc8acb18b Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Tue, 23 Jul 2024 15:11:28 +0800 Subject: [PATCH] common: update README and example CRDs Signed-off-by: Vicente Cheng --- README.md | 97 ++++++++++++----------------- examples/README.md | 7 +++ examples/generic-vol.yaml | 32 ++++++++++ examples/pod.yaml | 26 ++++++++ examples/pvc-clone.yaml | 16 +++++ examples/pvc.yaml | 12 ++++ examples/storageclass-dm-thin.yaml | 11 ++++ examples/storageclass-striped.yaml | 11 ++++ examples/volume-snapshot-class.yaml | 6 ++ examples/volumesnapshot.yaml | 9 +++ 10 files changed, 170 insertions(+), 57 deletions(-) create mode 100644 examples/README.md create mode 100644 examples/generic-vol.yaml create mode 100644 examples/pod.yaml create mode 100644 examples/pvc-clone.yaml create mode 100644 examples/pvc.yaml create mode 100644 examples/storageclass-dm-thin.yaml create mode 100644 examples/storageclass-striped.yaml create mode 100644 examples/volume-snapshot-class.yaml create mode 100644 examples/volumesnapshot.yaml diff --git a/README.md b/README.md index 0c11ff4b..2a42a904 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,65 @@ -# csi-driver-lvm # +# Harvester-csi-driver-lvm -CSI DRIVER LVM utilizes local storage of Kubernetes nodes to provide persistent storage for pods. +Harvester-CSI-Driver-LVM is derived from [metal-stack/csi-driver-lvm](https://github.com/metal-stack/csi-driver-lvm). -It automatically creates hostPath based persistent volumes on the nodes. +## Introduction -Underneath it creates a LVM logical volume on the local disks. A comma-separated list of grok pattern, which disks to use must be specified. +Harvester-CSI-Driver-LVM utilizes local storage to provide persistent storage for workloads (Usually VM workloads). It will make the VM unable to be migrated to other nodes, but it can provide better performance. -This CSI driver is derived from [csi-driver-host-path](https://github.com/kubernetes-csi/csi-driver-host-path) and [csi-lvm](https://github.com/metal-stack/csi-lvm) +Before you use it, you should have the pre-established Volume Group (VG) on that node. The VG name will be specified in the StorageClass. -## Currently it can create, delete, mount, unmount and resize block and filesystem volumes via lvm ## +The Harvester-CSI-Driver-LVM provides the following features: +- OnDemand Creation of Logical Volume (LV). +- Support LVM type Striped and DM-Thin. +- Support for Raw Block Volume. +- Support Volume Expansion. +- Support Volume Snapshot. +- Support Volume Clone. -For the special case of block volumes, the filesystem-expansion has to be performed by the app using the block device +**NOTE**: The Snapshot/Clone feature only works on the same nodes. Clone works for different Volume Groups. ## Installation ## -**Helm charts for installation are located in a separate repository called [helm-charts](https://github.com/metal-stack/helm-charts). If you would like to contribute to the helm chart, please raise an issue or pull request there.** +You can use Helm to install the Harvester-CSI-Driver-LVM by remote repo or local helm chart files. -You have to set the devicePattern for your hardware to specify which disks should be used to create the volume group. +1. Install the Harvester-CSI-Driver-LVM locally: -```bash -helm install --repo https://helm.metal-stack.io mytest helm/csi-driver-lvm --set lvm.devicePattern='/dev/nvme[0-9]n[0-9]' +``` +$ git clone https://github.com/harvester/csi-driver-lvm.git +$ cd csi-driver-lvm/deploy +$ helm install harvester-lvm-csi-driver charts/ -n harvester-system ``` -Now you can use one of following storageClasses: +2. Install the Harvester-CSI-Driver-LVM by remote repo: -* `csi-driver-lvm-linear` -* `csi-driver-lvm-mirror` -* `csi-driver-lvm-striped` +``` +$ helm repo add harvester https://charts.harvesterhci.io +$ helm install harvester/harvester-lvm-csi-driver -n harvester-system +``` -To get the previous old and now deprecated `csi-lvm-sc-linear`, ... storageclasses, set helm-chart value `compat03x=true`. +After the installation, you can check the status of the following pods: +``` +$ kubectl get pods -A |grep harvester-csi-driver-lvm +harvester-system harvester-csi-driver-lvm-controller-0 4/4 Running 0 3h2m +harvester-system harvester-csi-driver-lvm-plugin-ctlgp 3/3 Running 1 (14h ago) 14h +harvester-system harvester-csi-driver-lvm-plugin-qxxqs 3/3 Running 1 (14h ago) 14h +harvester-system harvester-csi-driver-lvm-plugin-xktx2 3/3 Running 0 14h +``` -## Migration ## +The CSI driver will be installed in the `harvester-system` namespace and provision to each node. -If you want to migrate your existing PVC to / from csi-driver-lvm, you can use [korb](https://github.com/BeryJu/korb). +After installation, you can refer to the `examples` directory for some example CRDs for usage. ### Todo ### -* implement CreateSnapshot(), ListSnapshots(), DeleteSnapshot() - - -### Test ### - -```bash -kubectl apply -f examples/csi-pvc-raw.yaml -kubectl apply -f examples/csi-pod-raw.yaml - - -kubectl apply -f examples/csi-pvc.yaml -kubectl apply -f examples/csi-app.yaml +* Implement the unittest +* Implement the webhook for the validation -kubectl delete -f examples/csi-pod-raw.yaml -kubectl delete -f examples/csi-pvc-raw.yaml +### HowTo Build -kubectl delete -f examples/csi-app.yaml -kubectl delete -f examples/csi-pvc.yaml ``` - -### Development ### - -In order to run the integration tests locally, you need to create to loop devices on your host machine. Make sure the loop device mount paths are not used on your system (default path is `/dev/loop10{0,1}`). - -You can create these loop devices like this: - -```bash -for i in 100 101; do fallocate -l 1G loop${i}.img ; sudo losetup /dev/loop${i} loop${i}.img; done -sudo losetup -a -# use this for recreation or cleanup -# for i in 100 101; do sudo losetup -d /dev/loop${i}; rm -f loop${i}.img; done +$ make ``` -You can then run the tests against a kind cluster, running: - -```bash -make test -``` - -To recreate or cleanup the kind cluster: - -```bash -make test-cleanup -``` +The above command will execute the validation and build the target Image. +You can define your REPO and TAG with ENV `REPO` and `TAG`. \ No newline at end of file diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..5741b236 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,7 @@ + +There are some example for using the `harvester-csi-driver-lvm`. + +Before you start, you need to install the `harvester-csi-driver-lvm` first. For more information, refer to the [installation guide](../README.md#installation). +Also, the examples contain some hardcode values. You need to modify them to fit your environment. +- vgName: You nee to ensure the vgName was created in your environment. +- nodeAffinity: You need to replace the value of the corresponding node name in your environment. \ No newline at end of file diff --git a/examples/generic-vol.yaml b/examples/generic-vol.yaml new file mode 100644 index 00000000..0fbed212 --- /dev/null +++ b/examples/generic-vol.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-01 +spec: + containers: + - name: test-container + image: nginx + volumeDevices: + - name: ephemeral-volume + devicePath: /vol001 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.lvm.csi/node + operator: In + values: + - harvester-node-2 + volumes: + - name: ephemeral-volume + ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: lvm-striped + volumeMode: Block \ No newline at end of file diff --git a/examples/pod.yaml b/examples/pod.yaml new file mode 100644 index 00000000..7d6de90a --- /dev/null +++ b/examples/pod.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ubuntu-jammy-pod +spec: + containers: + - name: ubuntu-jammy-container + image: ubuntu:jammy + command: ["/bin/bash", "-c", "--"] + args: ["while true; do sleep 30; done;"] + volumeDevices: + - devicePath: "/volumes/vol001" + name: vol001 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.lvm.csi/node + operator: In + values: + - harvester-node-2 + volumes: + - name: vol001 + persistentVolumeClaim: + claimName: vol001 \ No newline at end of file diff --git a/examples/pvc-clone.yaml b/examples/pvc-clone.yaml new file mode 100644 index 00000000..6c1ec8c9 --- /dev/null +++ b/examples/pvc-clone.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vol001-clone +spec: + storageClassName: lvm-striped + dataSource: + name: vol001 + kind: PersistentVolumeClaim + apiGroup: "" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + volumeMode: Block \ No newline at end of file diff --git a/examples/pvc.yaml b/examples/pvc.yaml new file mode 100644 index 00000000..6aac06c2 --- /dev/null +++ b/examples/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vol001 +spec: + storageClassName: lvm-striped + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + volumeMode: Block \ No newline at end of file diff --git a/examples/storageclass-dm-thin.yaml b/examples/storageclass-dm-thin.yaml new file mode 100644 index 00000000..c097a539 --- /dev/null +++ b/examples/storageclass-dm-thin.yaml @@ -0,0 +1,11 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: lvm-dm-thin +parameters: + type: dm-thin + vgName: vg01 +provisioner: lvm.driver.harvesterhci.io +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/examples/storageclass-striped.yaml b/examples/storageclass-striped.yaml new file mode 100644 index 00000000..71aa48a2 --- /dev/null +++ b/examples/storageclass-striped.yaml @@ -0,0 +1,11 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: lvm-striped +parameters: + type: striped + vgName: vg01 +provisioner: lvm.driver.harvesterhci.io +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer \ No newline at end of file diff --git a/examples/volume-snapshot-class.yaml b/examples/volume-snapshot-class.yaml new file mode 100644 index 00000000..9babc0fa --- /dev/null +++ b/examples/volume-snapshot-class.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1 +deletionPolicy: Delete +driver: lvm.driver.harvesterhci.io +kind: VolumeSnapshotClass +metadata: + name: lvm-snapshot \ No newline at end of file diff --git a/examples/volumesnapshot.yaml b/examples/volumesnapshot.yaml new file mode 100644 index 00000000..6bcbf062 --- /dev/null +++ b/examples/volumesnapshot.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: vol001-snapshot + namespace: default +spec: + volumeSnapshotClassName: lvm-snapshot + source: + persistentVolumeClaimName: vol001 \ No newline at end of file