From 84661b58c9a6e50bbe59beaee90bfbfd033b1e27 Mon Sep 17 00:00:00 2001 From: Gerhard Lazu Date: Fri, 28 Apr 2023 07:23:07 +0100 Subject: [PATCH] Add support for Talos OS (#87) * Add Talos OS support `/etc` on Talos OS is read-only, which means that new PVs will fail to create. This change makes the hard-coded `/etc/lvm` configurable via the `--hostwritepath` flag. NOTE that this also changes the current `/run/lock/lvm` to `/etc/lvm/lock`. This is a requirement for https://github.com/metal-stack/helm-charts/pull/64 Signed-off-by: Gerhard Lazu * Create loop devices part of `make test` After this change, people which expect integration tests to be self-contained, will not be disappointed. It took me a while to figure out why **some** integration tests were failing locally. I eventually found out about this requirement in this doc page: https://docs.metal-stack.io/stable/external/csi-driver-lvm/README/. The GitHub Actions workflow also helped. Even then, the mknod command was not mentioned anywhere. My NixOS host did not have these special files /dev/loop100 & /dev/loop101 created. With this change, `make test` is self-contained & it should work the same on all Linux hosts, whether it's a local development workstation or running in GitHub Actions. Speaking of GitHub Actions, we do not want to run the build-platforms job if the DOCKER_REGISTRY_TOKEN secret is not set. If we don't check for this, the job will fail in repo forks, where these secrets will not be available by default. FWIW, `${{ secrets. }}` is not available in `if` conditions. The secret value needs to be exposed as an env for the `if` condition to work correctly. FTR: https://github.com/orgs/community/discussions/26726 I also remembered to remove the loop devices part of `make test-cleanup` & double-check that the loop device has been actually removed. I have hit a situation where the file was deleted, but /dev/loop100 was still left dangling. Had to `sudo dmsetup remove` it. Lastly, Docker CLI is configured to ignore the *.img files. These are created in the same directory and should not be sent to Docker when running `docker build`. Signed-off-by: Gerhard Lazu * Refactor tests Remove all hard-coded sleeps **except** the last one, when we delete the csi-lvm-controller, otherwise PVCs may not get deleted before the controller is deleted. When this happens, the loop devices will not be cleared correctly when running `make test-cleanup`. We also want to test one thing per test, otherwise we may not know why a test failed. We leverage `kubectl wait --for=jsonpath=` as much as possible. This way the tests do not need to check for specific strings, we let `--for=jsonpath=` do that. The best part with this approach is that we can use the `--timeout` flag. This brings the **entire** integration test suite duration to 70 seconds. Before this change, the sleeps alone (170s) would take longer than that. To double-check for race conditions or flaky tests, I ran all tests locally 100 times with `RERUN=100 make test`. All 100 runs passed. This looks good to me! Separately, I have also tested this in Talos v1.4.0 running K8s 1.26.4. Everything works as expected now. See this PR comment for more details: https://github.com/metal-stack/csi-driver-lvm/pull/87#issuecomment-1519667687 Signed-off-by: Gerhard Lazu --------- Signed-off-by: Gerhard Lazu --- .dockerignore | 1 + .github/workflows/docker.yaml | 11 +- Makefile | 36 +++++- cmd/lvmplugin/main.go | 3 +- pkg/lvm/controllerserver.go | 6 +- pkg/lvm/lvm.go | 13 +- tests/bats/test.bats | 115 ++++++++---------- .../files/{block.yaml => pod.block.vol.yaml} | 0 .../{xfs.yaml => pod.inline.vol.xfs.yaml} | 0 .../{inline.yaml => pod.inline.vol.yaml} | 0 .../{linear.yaml => pod.linear.vol.yaml} | 0 ...{pvc-resize.yaml => pvc.block.resize.yaml} | 12 -- tests/files/{pvc.yaml => pvc.block.yaml} | 12 -- tests/files/pvc.linear.resize.yaml | 11 ++ tests/files/pvc.linear.yaml | 11 ++ 15 files changed, 131 insertions(+), 100 deletions(-) create mode 100644 .dockerignore rename tests/files/{block.yaml => pod.block.vol.yaml} (100%) rename tests/files/{xfs.yaml => pod.inline.vol.xfs.yaml} (100%) rename tests/files/{inline.yaml => pod.inline.vol.yaml} (100%) rename tests/files/{linear.yaml => pod.linear.vol.yaml} (100%) rename tests/files/{pvc-resize.yaml => pvc.block.resize.yaml} (51%) rename tests/files/{pvc.yaml => pvc.block.yaml} (51%) create mode 100644 tests/files/pvc.linear.resize.yaml create mode 100644 tests/files/pvc.linear.yaml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..a89285e5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +*.img diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index c01d9b06..ae8e096f 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -55,8 +55,6 @@ jobs: - name: Test run: | - for i in 100 101; do fallocate -l 1G loop${i}.img ; sudo losetup /dev/loop${i} loop${i}.img; done - sudo losetup -a make test build-platforms: @@ -65,9 +63,12 @@ jobs: needs: - lint - test + env: + DOCKER_REGISTRY_TOKEN: ${{ secrets.DOCKER_REGISTRY_TOKEN }} steps: - name: Log in to the container registry + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: docker/login-action@v2 with: registry: ${{ env.REGISTRY }} @@ -75,23 +76,28 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_TOKEN }} - name: Checkout + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: actions/checkout@v3 - name: Set up Go 1.19 + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: actions/setup-go@v3 with: go-version: 1.19 - name: Set up Docker Buildx + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: docker/setup-buildx-action@v2 - name: Make tag + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} run: | [ "${GITHUB_EVENT_NAME}" == 'pull_request' ] && echo "tag=${GITHUB_HEAD_REF##*/}" >> $GITHUB_ENV || true [ "${GITHUB_EVENT_NAME}" == 'release' ] && echo "tag=${GITHUB_REF##*/}" >> $GITHUB_ENV || true [ "${GITHUB_EVENT_NAME}" == 'push' ] && echo "tag=latest" >> $GITHUB_ENV || true - name: Build and push image + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: docker/build-push-action@v3 with: context: . @@ -100,6 +106,7 @@ jobs: platforms: linux/amd64,linux/arm64,linux/arm/v7 - name: Build and push provisioner image + if: ${{ env.DOCKER_REGISTRY_TOKEN != '' }} uses: docker/build-push-action@v3 with: context: . diff --git a/Makefile b/Makefile index f738df09..bb0f13e8 100644 --- a/Makefile +++ b/Makefile @@ -30,8 +30,25 @@ build-plugin: build-provisioner: docker build -t csi-driver-lvm-provisioner . -f cmd/provisioner/Dockerfile -.PHONY: test -test: build-plugin build-provisioner +/dev/loop%: + @fallocate --length 1G loop$*.img +ifndef GITHUB_ACTIONS + @sudo mknod $@ b 7 $* +endif + @sudo losetup $@ loop$*.img + @sudo losetup $@ + +rm-loop%: + @sudo losetup -d /dev/loop$* || true + @! losetup /dev/loop$* + @sudo rm -f /dev/loop$* + @rm loop$*.img +# If removing this loop device fails, you may need to: +# sudo dmsetup info +# sudo dmsetup remove + +.PHONY: kind +kind: @if ! which kind > /dev/null; then echo "kind needs to be installed"; exit 1; fi @if ! kind get clusters | grep csi-driver-lvm > /dev/null; then \ kind create cluster \ @@ -40,15 +57,24 @@ test: build-plugin build-provisioner --kubeconfig $(KUBECONFIG); fi @kind --name csi-driver-lvm load docker-image csi-driver-lvm @kind --name csi-driver-lvm load docker-image csi-driver-lvm-provisioner + +.PHONY: rm-kind +rm-kind: + @kind delete cluster --name csi-driver-lvm + +RERUN ?= 1 +.PHONY: test +test: build-plugin build-provisioner /dev/loop100 /dev/loop101 kind @cd tests && docker build -t csi-bats . && cd - + @for i in {1..$(RERUN)}; do \ docker run -i$(DOCKER_TTY_ARG) \ -e HELM_REPO=$(HELM_REPO) \ -v "$(KUBECONFIG):/root/.kube/config" \ -v "$(PWD)/tests:/code" \ --network host \ csi-bats \ - --verbose-run --trace --timing bats/test.bats + --verbose-run --trace --timing bats/test.bats ; \ + done .PHONY: test-cleanup -test-cleanup: - @kind delete cluster --name csi-driver-lvm +test-cleanup: rm-loop100 rm-loop101 rm-kind diff --git a/cmd/lvmplugin/main.go b/cmd/lvmplugin/main.go index 402bc0b4..b762a21f 100644 --- a/cmd/lvmplugin/main.go +++ b/cmd/lvmplugin/main.go @@ -35,6 +35,7 @@ func init() { var ( endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") + hostWritePath = flag.String("hostwritepath", "/etc/lvm", "host path where config, cache & backups will be written to") driverName = flag.String("drivername", "lvm.csi.metal-stack.io", "name of the driver") nodeID = flag.String("nodeid", "", "node id") ephemeral = flag.Bool("ephemeral", false, "publish volumes in ephemeral mode even if kubelet did not ask for it (only needed for Kubernetes 1.15)") @@ -68,7 +69,7 @@ func main() { } func handle() { - driver, err := lvm.NewLvmDriver(*driverName, *nodeID, *endpoint, *ephemeral, *maxVolumesPerNode, version, *devicesPattern, *vgName, *namespace, *provisionerImage, *pullPolicy) + driver, err := lvm.NewLvmDriver(*driverName, *nodeID, *endpoint, *hostWritePath, *ephemeral, *maxVolumesPerNode, version, *devicesPattern, *vgName, *namespace, *provisionerImage, *pullPolicy) if err != nil { fmt.Printf("Failed to initialize driver: %s\n", err.Error()) os.Exit(1) diff --git a/pkg/lvm/controllerserver.go b/pkg/lvm/controllerserver.go index 178cf448..ccc9fee7 100644 --- a/pkg/lvm/controllerserver.go +++ b/pkg/lvm/controllerserver.go @@ -36,6 +36,7 @@ type controllerServer struct { nodeID string devicesPattern string vgName string + hostWritePath string kubeClient kubernetes.Clientset provisionerImage string pullPolicy v1.PullPolicy @@ -43,7 +44,7 @@ type controllerServer struct { } // NewControllerServer -func newControllerServer(ephemeral bool, nodeID string, devicesPattern string, vgName string, namespace string, provisionerImage string, pullPolicy v1.PullPolicy) (*controllerServer, error) { +func newControllerServer(ephemeral bool, nodeID string, devicesPattern string, vgName string, hostWritePath string, namespace string, provisionerImage string, pullPolicy v1.PullPolicy) (*controllerServer, error) { if ephemeral { return &controllerServer{caps: getControllerServiceCapabilities(nil), nodeID: nodeID}, nil } @@ -69,6 +70,7 @@ func newControllerServer(ephemeral bool, nodeID string, devicesPattern string, v }), nodeID: nodeID, devicesPattern: devicesPattern, + hostWritePath: hostWritePath, vgName: vgName, kubeClient: *kubeClient, namespace: namespace, @@ -137,6 +139,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol kubeClient: cs.kubeClient, namespace: cs.namespace, vgName: cs.vgName, + hostWritePath: cs.hostWritePath, } if err := createProvisionerPod(ctx, va); err != nil { klog.Errorf("error creating provisioner pod :%v", err) @@ -197,6 +200,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol kubeClient: cs.kubeClient, namespace: cs.namespace, vgName: cs.vgName, + hostWritePath: cs.hostWritePath, } if err := createProvisionerPod(ctx, va); err != nil { klog.Errorf("error creating provisioner pod :%v", err) diff --git a/pkg/lvm/lvm.go b/pkg/lvm/lvm.go index 0cd9a18b..07072d00 100644 --- a/pkg/lvm/lvm.go +++ b/pkg/lvm/lvm.go @@ -45,6 +45,7 @@ type Lvm struct { nodeID string version string endpoint string + hostWritePath string ephemeral bool maxVolumesPerNode int64 devicesPattern string @@ -76,6 +77,7 @@ type volumeAction struct { kubeClient kubernetes.Clientset namespace string vgName string + hostWritePath string } const ( @@ -93,7 +95,7 @@ var ( ) // NewLvmDriver creates the driver -func NewLvmDriver(driverName, nodeID, endpoint string, ephemeral bool, maxVolumesPerNode int64, version string, devicesPattern string, vgName string, namespace string, provisionerImage string, pullPolicy string) (*Lvm, error) { +func NewLvmDriver(driverName, nodeID, endpoint string, hostWritePath string, ephemeral bool, maxVolumesPerNode int64, version string, devicesPattern string, vgName string, namespace string, provisionerImage string, pullPolicy string) (*Lvm, error) { if driverName == "" { return nil, fmt.Errorf("no driver name provided") } @@ -123,6 +125,7 @@ func NewLvmDriver(driverName, nodeID, endpoint string, ephemeral bool, maxVolume version: vendorVersion, nodeID: nodeID, endpoint: endpoint, + hostWritePath: hostWritePath, ephemeral: ephemeral, maxVolumesPerNode: maxVolumesPerNode, devicesPattern: devicesPattern, @@ -139,7 +142,7 @@ func (lvm *Lvm) Run() error { // Create GRPC servers lvm.ids = newIdentityServer(lvm.name, lvm.version) lvm.ns = newNodeServer(lvm.nodeID, lvm.ephemeral, lvm.maxVolumesPerNode, lvm.devicesPattern, lvm.vgName) - lvm.cs, err = newControllerServer(lvm.ephemeral, lvm.nodeID, lvm.devicesPattern, lvm.vgName, lvm.namespace, lvm.provisionerImage, lvm.pullPolicy) + lvm.cs, err = newControllerServer(lvm.ephemeral, lvm.nodeID, lvm.devicesPattern, lvm.vgName, lvm.hostWritePath, lvm.namespace, lvm.provisionerImage, lvm.pullPolicy) if err != nil { return err } @@ -360,7 +363,7 @@ func createProvisionerPod(ctx context.Context, va volumeAction) (err error) { Name: "lvmbackup", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ - Path: "/etc/lvm/backup", + Path: filepath.Join(va.hostWritePath, "backup"), Type: &hostPathType, }, }, @@ -369,7 +372,7 @@ func createProvisionerPod(ctx context.Context, va volumeAction) (err error) { Name: "lvmcache", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ - Path: "/etc/lvm/cache", + Path: filepath.Join(va.hostWritePath, "cache"), Type: &hostPathType, }, }, @@ -378,7 +381,7 @@ func createProvisionerPod(ctx context.Context, va volumeAction) (err error) { Name: "lvmlock", VolumeSource: v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ - Path: "/run/lock/lvm", + Path: filepath.Join(va.hostWritePath, "lock"), Type: &hostPathType, }, }, diff --git a/tests/bats/test.bats b/tests/bats/test.bats index e88057ba..6f4d6845 100644 --- a/tests/bats/test.bats +++ b/tests/bats/test.bats @@ -1,126 +1,115 @@ #!/usr/bin/env bats -p @test "deploy csi-lvm-controller" { - run helm install --repo ${HELM_REPO} csi-driver-lvm csi-driver-lvm --values values.yaml --wait + run helm upgrade --install --repo ${HELM_REPO} csi-driver-lvm csi-driver-lvm --values values.yaml --wait --timeout=120s [ "$status" -eq 0 ] } @test "deploy inline pod with ephemeral volume" { - run sleep 10 - run kubectl apply -f files/inline.yaml + run kubectl apply -f files/pod.inline.vol.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod/volume-test-inline created" ] } @test "inline pod running" { - run kubectl wait --for=condition=ready pod/volume-test-inline --timeout=180s - run kubectl get pods volume-test-inline -o jsonpath="{.metadata.name},{.status.phase}" + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.inline.vol.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "volume-test-inline,Running" ] } @test "delete inline linear pod" { - run kubectl delete -f files/inline.yaml + run kubectl delete -f files/pod.inline.vol.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod \"volume-test-inline\" deleted" ] } -@test "create pvc" { - run kubectl apply -f files/pvc.yaml +@test "create pvc linear" { + run kubectl apply -f files/pvc.linear.yaml --wait --timeout=10s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Pending -f files/pvc.linear.yaml --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "persistentvolumeclaim/lvm-pvc-block created" ] - [ "${lines[1]}" = "persistentvolumeclaim/lvm-pvc-linear created" ] } @test "deploy linear pod" { - run kubectl apply -f files/linear.yaml + run kubectl apply -f files/pod.linear.vol.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod/volume-test created" ] } -@test "deploy block pod" { - run kubectl apply -f files/block.yaml +@test "linear pod running" { + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.linear.vol.yaml --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod/volume-test-block created" ] } -@test "linear pvc bound" { - run kubectl wait --for=condition=ready pod/volume-test --timeout=180s - run kubectl get pvc lvm-pvc-linear -o jsonpath="{.metadata.name},{.status.phase}" +@test "pvc linear bound" { + run kubectl wait --for=jsonpath='{.status.phase}'=Bound -f files/pvc.linear.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "lvm-pvc-linear,Bound" ] } -@test "linear pod running" { - run kubectl get pods volume-test -o jsonpath="{.metadata.name},{.status.phase}" +@test "resize linear pvc" { + run kubectl apply -f files/pvc.linear.resize.yaml --wait --timeout=10s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.capacity.storage}'=20Mi -f files/pvc.linear.resize.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "volume-test,Running" ] } -@test "block pvc bound" { - run kubectl wait --for=condition=ready pod/volume-test-block --timeout=180s - run kubectl get pvc lvm-pvc-block -o jsonpath="{.metadata.name},{.status.phase}" +@test "create block pvc" { + run kubectl apply -f files/pvc.block.yaml --wait --timeout=10s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.phase}'=Pending -f files/pvc.block.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "lvm-pvc-block,Bound" ] } -@test "block pod running" { - run kubectl get pods volume-test-block -o jsonpath="{.metadata.name},{.status.phase}" +@test "deploy block pod" { + run kubectl apply -f files/pod.block.vol.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "volume-test-block,Running" ] } -@test "resize volume requests" { - run kubectl apply -f files/pvc-resize.yaml +@test "block pod running" { + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.block.vol.yaml --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "persistentvolumeclaim/lvm-pvc-block configured" ] - [ "${lines[1]}" = "persistentvolumeclaim/lvm-pvc-linear configured" ] } -@test "check new linear volume size" { - run sleep 90 - run kubectl get pvc lvm-pvc-linear -o jsonpath='{.status.capacity.storage}' +@test "pvc block bound" { + run kubectl wait --for=jsonpath='{.status.phase}'=Bound -f files/pvc.block.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "20Mi" ] } -@test "check new block volume size" { - run kubectl get pvc lvm-pvc-block -o jsonpath='{.status.capacity.storage}' +@test "resize block pvc" { + run kubectl apply -f files/pvc.block.resize.yaml --wait --timeout=10s + [ "$status" -eq 0 ] + + run kubectl wait --for=jsonpath='{.status.capacity.storage}'=20Mi -f files/pvc.block.resize.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "20Mi" ] } @test "delete linear pod" { - run kubectl delete -f files/linear.yaml + run kubectl delete -f files/pod.linear.vol.yaml --wait --timeout=10s + [ "$status" -eq 0 ] +} + +@test "delete resized linear pvc" { + run kubectl delete -f files/pvc.linear.resize.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod \"volume-test\" deleted" ] } @test "delete block pod" { - run kubectl delete -f files/block.yaml + run kubectl delete -f files/pod.block.vol.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod \"volume-test-block\" deleted" ] } -@test "delete pvc" { - run kubectl delete -f files/pvc.yaml +@test "delete resized block pvc" { + run kubectl delete -f files/pvc.block.resize.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "persistentvolumeclaim \"lvm-pvc-block\" deleted" ] - [ "${lines[1]}" = "persistentvolumeclaim \"lvm-pvc-linear\" deleted" ] } @test "deploy inline xfs pod with ephemeral volume" { - run sleep 10 - run kubectl apply -f files/xfs.yaml + run kubectl apply -f files/pod.inline.vol.xfs.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod/volume-test-inline-xfs created" ] } @test "inline xfs pod running" { - run kubectl wait --for=condition=ready pod/volume-test-inline-xfs --timeout=180s - run kubectl get pods volume-test-inline-xfs -o jsonpath="{.metadata.name},{.status.phase}" + run kubectl wait --for=jsonpath='{.status.phase}'=Running -f files/pod.inline.vol.xfs.yaml --timeout=10s [ "$status" -eq 0 ] - [ "$output" = "volume-test-inline-xfs,Running" ] } @test "check fsType" { @@ -130,12 +119,14 @@ } @test "delete inline xfs linear pod" { - run kubectl delete -f files/xfs.yaml + run kubectl delete -f files/pod.inline.vol.xfs.yaml --wait --timeout=10s [ "$status" -eq 0 ] - [ "${lines[0]}" = "pod \"volume-test-inline-xfs\" deleted" ] } -@test "clean up " { - run sleep 60 - run helm uninstall csi-driver-lvm +@test "delete csi-lvm-controller" { + echo "⏳ Wait 10s for all PVCs to be cleaned up..." >&3 + sleep 10 + + run helm uninstall csi-driver-lvm --wait --timeout=30s + [ "$status" -eq 0 ] } diff --git a/tests/files/block.yaml b/tests/files/pod.block.vol.yaml similarity index 100% rename from tests/files/block.yaml rename to tests/files/pod.block.vol.yaml diff --git a/tests/files/xfs.yaml b/tests/files/pod.inline.vol.xfs.yaml similarity index 100% rename from tests/files/xfs.yaml rename to tests/files/pod.inline.vol.xfs.yaml diff --git a/tests/files/inline.yaml b/tests/files/pod.inline.vol.yaml similarity index 100% rename from tests/files/inline.yaml rename to tests/files/pod.inline.vol.yaml diff --git a/tests/files/linear.yaml b/tests/files/pod.linear.vol.yaml similarity index 100% rename from tests/files/linear.yaml rename to tests/files/pod.linear.vol.yaml diff --git a/tests/files/pvc-resize.yaml b/tests/files/pvc.block.resize.yaml similarity index 51% rename from tests/files/pvc-resize.yaml rename to tests/files/pvc.block.resize.yaml index 1d72decf..791188a7 100644 --- a/tests/files/pvc-resize.yaml +++ b/tests/files/pvc.block.resize.yaml @@ -10,15 +10,3 @@ spec: resources: requests: storage: 20Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: lvm-pvc-linear -spec: - accessModes: - - ReadWriteOnce - storageClassName: csi-driver-lvm-linear - resources: - requests: - storage: 20Mi diff --git a/tests/files/pvc.yaml b/tests/files/pvc.block.yaml similarity index 51% rename from tests/files/pvc.yaml rename to tests/files/pvc.block.yaml index 73e72535..c2a4d5c1 100644 --- a/tests/files/pvc.yaml +++ b/tests/files/pvc.block.yaml @@ -10,15 +10,3 @@ spec: resources: requests: storage: 10Mi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: lvm-pvc-linear -spec: - accessModes: - - ReadWriteOnce - storageClassName: csi-driver-lvm-linear - resources: - requests: - storage: 10Mi diff --git a/tests/files/pvc.linear.resize.yaml b/tests/files/pvc.linear.resize.yaml new file mode 100644 index 00000000..f118e8e4 --- /dev/null +++ b/tests/files/pvc.linear.resize.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-linear +spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-driver-lvm-linear + resources: + requests: + storage: 20Mi diff --git a/tests/files/pvc.linear.yaml b/tests/files/pvc.linear.yaml new file mode 100644 index 00000000..5a5296f5 --- /dev/null +++ b/tests/files/pvc.linear.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-pvc-linear +spec: + accessModes: + - ReadWriteOnce + storageClassName: csi-driver-lvm-linear + resources: + requests: + storage: 10Mi