diff --git a/e2e/keywords/workload.resource b/e2e/keywords/workload.resource index b3bc191534..d8363df38d 100644 --- a/e2e/keywords/workload.resource +++ b/e2e/keywords/workload.resource @@ -181,3 +181,14 @@ Check ${workload_kind} ${workload_id} pod is ${expect_state} on another node Delete Longhorn ${workload_kind} ${workload_name} pod on node ${node_id} ${node_name} = get_node_by_index ${node_id} delete_workload_pod_on_node ${workload_name} ${node_name} longhorn-system + +Trim ${workload_kind} ${workload_id} volume should ${condition} + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + + IF $condition == "fail" + trim_workload_volume_filesystem ${workload_name} is_expect_fail=True + ELSE IF $condition == "pass" + trim_workload_volume_filesystem ${workload_name} is_expect_fail=False + ELSE + Fail "Invalid condition value: ${condition}" + END diff --git a/e2e/libs/keywords/workload_keywords.py b/e2e/libs/keywords/workload_keywords.py index 507a7d0afb..9461fb8e6f 100644 --- a/e2e/libs/keywords/workload_keywords.py +++ b/e2e/libs/keywords/workload_keywords.py @@ -170,3 +170,7 @@ def is_workloads_pods_has_annotations(self, workload_names, annotation_key, name if not is_workload_pods_has_annotations(workload_name, annotation_key, namespace=namespace, label_selector=label_selector): return False return True + + def trim_workload_volume_filesystem(self, workload_name, is_expect_fail=False): + volume_name = get_workload_volume_name(workload_name) + self.volume.trim_filesystem(volume_name, is_expect_fail=is_expect_fail) diff --git a/e2e/libs/volume/crd.py b/e2e/libs/volume/crd.py index ec89be8f77..ed5a21d8fd 100644 --- a/e2e/libs/volume/crd.py +++ b/e2e/libs/volume/crd.py @@ -495,3 +495,6 @@ def upgrade_engine_image(self, volume_name, engine_image_name): def wait_for_engine_image_upgrade_completed(self, volume_name, engine_image_name): return Rest(self.node_exec).wait_for_engine_image_upgrade_completed(volume_name, engine_image_name) + + def trim_filesystem(self, volume_name, is_expect_fail=False): + return Rest(self).trim_filesystem(volume_name, is_expect_fail=is_expect_fail) diff --git a/e2e/libs/volume/rest.py b/e2e/libs/volume/rest.py index 4ec867df15..306e1af998 100644 --- a/e2e/libs/volume/rest.py +++ b/e2e/libs/volume/rest.py @@ -348,3 +348,20 @@ def wait_for_replica_ready_to_rw(self, volume_name): break time.sleep(self.retry_interval) assert ready, f"Failed to get volume {volume_name} replicas ready: {replicas}" + + def trim_filesystem(self, volume_name, is_expect_fail=False): + is_unexpected_pass = False + try: + self.get(volume_name).trimFilesystem(name=volume_name) + + if is_expect_fail: + is_unexpected_pass = True + + except Exception as e: + if is_expect_fail: + logging(f"Failed to trim filesystem: {e}") + else: + raise e + + if is_unexpected_pass: + raise Exception(f"Expected volume {volume_name} trim filesystem to fail") diff --git a/e2e/libs/volume/volume.py b/e2e/libs/volume/volume.py index 7d8fcec34b..8c9144ee7e 100644 --- a/e2e/libs/volume/volume.py +++ b/e2e/libs/volume/volume.py @@ -151,3 +151,6 @@ def upgrade_engine_image(self, volume_name, engine_image_name): def wait_for_engine_image_upgrade_completed(self, volume_name, engine_image_name): return self.volume.wait_for_engine_image_upgrade_completed(volume_name, engine_image_name) + + def trim_filesystem(self, volume_name, is_expect_fail=False): + return self.volume.trim_filesystem(volume_name, is_expect_fail=is_expect_fail) diff --git a/e2e/tests/regression/test_v2.robot b/e2e/tests/regression/test_v2.robot index 27c3831665..137d7eb7c3 100644 --- a/e2e/tests/regression/test_v2.robot +++ b/e2e/tests/regression/test_v2.robot @@ -11,6 +11,8 @@ Resource ../keywords/workload.resource Resource ../keywords/volume.resource Resource ../keywords/setting.resource Resource ../keywords/node.resource +Resource ../keywords/host.resource +Resource ../keywords/longhorn.resource Test Setup Set test environment Test Teardown Cleanup test resources @@ -50,3 +52,23 @@ Degraded Volume Replica Rebuilding And Wait for deployment 0 pods stable Then Check deployment 0 data in file data.txt is intact END + +V2 Volume Should Block Trim When Volume Is Degraded + Given Set setting auto-salvage to true + And Create storageclass longhorn-test with dataEngine=v2 + And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + + FOR ${i} IN RANGE ${LOOP_COUNT} + And Keep writing data to pod of deployment 0 + + When Restart cluster + And Wait for longhorn ready + And Wait for volume of deployment 0 attached and degraded + Then Trim deployment 0 volume should fail + + When Wait for workloads pods stable + ... deployment 0 + And Check deployment 0 works + Then Trim deployment 0 volume should pass + END