Skip to content

Commit

Permalink
test(robot): improve data verification for large volume data
Browse files Browse the repository at this point in the history
longhorn/longhorn#8355

Signed-off-by: Roger Yao <roger.yao@suse.com>
  • Loading branch information
roger-ryao committed Sep 26, 2024
1 parent 53d6ca4 commit 3e47b36
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 19 deletions.
6 changes: 3 additions & 3 deletions e2e/keywords/workload.resource
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ Write ${size} MB data to file ${file_name} in pod ${pod_id}
${pod_name} = generate_name_with_suffix pod ${pod_id}
write_workload_pod_random_data ${pod_name} ${size} ${file_name}

Write ${size} GB data to file ${file_name} in pod ${pod_id}
${size_in_mb} = Convert GB to MB ${size}
Write ${size_in_mb} MB data to file ${file_name} in pod ${pod_id}
Write ${size} GB large data to file ${file_name} in pod ${pod_id}
${pod_name} = generate_name_with_suffix pod ${pod_id}
write_workload_pod_large_data ${pod_name} ${size} ${file_name}

Power off volume node of ${workload_kind} ${workload_id} for ${duration} minutes
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
Expand Down
12 changes: 12 additions & 0 deletions e2e/libs/keywords/workload_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from workload.workload import is_workload_pods_has_annotations
from workload.workload import keep_writing_pod_data
from workload.workload import write_pod_random_data
from workload.workload import write_pod_large_data
from workload.workload import wait_for_workload_pods_running
from workload.workload import wait_for_workload_pods_stable
from workload.workload import wait_for_workload_pod_kept_in_state
Expand Down Expand Up @@ -87,6 +88,17 @@ def write_workload_pod_random_data(self, workload_name, size_in_mb, file_name):
volume_name = get_volume_name_by_pod(pod_name)
self.volume.set_annotation(volume_name, ANNOT_CHECKSUM, checksum)

def write_workload_pod_large_data(self, workload_name, size_in_gb, file_name):
pod_name = get_workload_pod_names(workload_name)[0]

logging(f'Writing {size_in_gb} GB large data to pod {pod_name} file {file_name}')
checksum = write_pod_large_data(pod_name, size_in_gb, file_name)

logging(f"Storing pod {pod_name} file {file_name} checksum = {checksum}")

volume_name = get_volume_name_by_pod(pod_name)
self.volume.set_annotation(volume_name, ANNOT_CHECKSUM, checksum)

def get_workload_pod_data_checksum(self, workload_name, file_name):
pod_name = get_workload_pod_names(workload_name)[0]
return get_pod_data_checksum(pod_name, file_name)
Expand Down
27 changes: 27 additions & 0 deletions e2e/libs/workload/workload.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,33 @@ def write_pod_random_data(pod_name, size_in_mb, file_name,
time.sleep(retry_interval)


def write_pod_large_data(pod_name, size_in_gb, file_name,
data_directory="/data", ):

wait_for_pod_status(pod_name, "Running")

retry_count, retry_interval = get_retry_count_and_interval()

for _ in range(retry_count):
try:
data_path = f"{data_directory}/{file_name}"
api = client.CoreV1Api()
write_data_cmd = [
'/bin/sh',
'-c',
f"fallocate -l {size_in_gb}G {data_path};\
sync;\
md5sum {data_path} | awk \'{{print $1}}\'"
]
return stream(
api.connect_get_namespaced_pod_exec, pod_name, 'default',
command=write_data_cmd, stderr=True, stdin=False, stdout=True,
tty=False)
except Exception as e:
logging(f"Writing random data to pod {pod_name} failed with error {e}")
time.sleep(retry_interval)


def keep_writing_pod_data(pod_name, size_in_mb=256, path="/data/overwritten-data"):

wait_for_pod_status(pod_name, "Running")
Expand Down
54 changes: 38 additions & 16 deletions e2e/tests/test_cases/test_backup_listing.robot
Original file line number Diff line number Diff line change
Expand Up @@ -110,19 +110,37 @@ Write data to file in deployment 0
Write 100 MB data to file data in deployment 0

Write 210 GB data in pod 2
FOR ${i} IN RANGE 21
Write 10 GB data to file ${i} in pod 2
END
Write 210 GB large data to file 0 in pod 2

Write 150 GB data in pod 4
Write 150 GB large data to file 0 in pod 4

Write 40 GB data in pod 4
Write 40 GB large data to file 1 in pod 4

Create pod ${pod_id} from volume ${source_volume_id} ${source_volume_size} GB volume backup ${backup_id}
${source_volume_name}= generate_name_with_suffix volume ${source_volume_id}
${backup_url}= get_backup_url ${backup_id} ${source_volume_name}
${volume_name}= generate_name_with_suffix volume ${pod_id}
create_volume ${volume_name} size=${source_volume_size}Gi numberOfReplicas=3 fromBackup=${backup_url} dataEngine=${DATA_ENGINE}
Create persistentvolume for volume ${pod_id}
Create persistentvolumeclaim for volume ${pod_id}
Create pod ${pod_id} using volume ${pod_id}
Wait for pod ${pod_id} running

Write 150 GB data in pod 3
FOR ${i} IN RANGE 15
Write 10 GB data to file ${i} in pod 3
END
Delete pod ${pod_id} and volume ${volume_id}
Delete pod ${pod_id}
Wait for volume ${volume_id} detached
Delete volume ${volume_id}

Write 40 GB data in pod 3
FOR ${i} IN RANGE 15 19
Write 10 GB data to file ${i} in pod 3
END
Pod ${pod_id} data should same as volume ${source_volume_id} backup ${backup_id}
${source_volume_name}= generate_name_with_suffix volume ${source_volume_id}
${backup_name}= get_backup_name ${backup_id} ${source_volume_name}
${expected_checksum}= get_restored_checksum ${backup_name}
${pod_name} = generate_name_with_suffix pod ${pod_id}
${current_checksum}= get_workload_pod_data_checksum ${pod_name} 0
Should Be Equal ${expected_checksum} ${current_checksum}
... msg="expected ${expected_checksum}, got ${current_checksum}!"

*** Test Cases ***
Backup listing with more than 1000 backups
Expand Down Expand Up @@ -151,8 +169,12 @@ Backup listing of volume bigger than 200 Gi
Then Create pod 2 mount 250 GB volume 2
And Write 210 GB data in pod 2
Then Volume 2 backup 0 should be able to create
Then Create pod 3 mount 200 GB volume 3
And Write 150 GB data in pod 3
Then Volume 3 backup 0 should be able to create
And Write 40 GB data in pod 3
Then Volume 3 backup 1 should be able to create
Then Delete pod 2 and volume 2
Then Create pod 3 from volume 2 250 GB volume backup 0
And Pod 3 data should same as volume 2 backup 0
And Delete pod 3 and volume 3
Then Create pod 4 mount 200 GB volume 4
And Write 150 GB data in pod 4
Then Volume 4 backup 0 should be able to create
And Write 40 GB data in pod 4
Then Volume 4 backup 1 should be able to create

0 comments on commit 3e47b36

Please sign in to comment.