Skip to content

Commit

Permalink
test(negative): implement delete node
Browse files Browse the repository at this point in the history
Signed-off-by: Yang Chiu <yang.chiu@suse.com>
  • Loading branch information
yangchiu committed Mar 22, 2024
1 parent 51ef717 commit 4e244c1
Show file tree
Hide file tree
Showing 22 changed files with 231 additions and 51 deletions.
3 changes: 3 additions & 0 deletions e2e/keywords/common.resource
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,6 @@ Cleanup test resources
cleanup_persistentvolumeclaims
cleanup_volumes
cleanup_storageclasses

Set ${setting_name} to ${setting_value}
change_setting ${setting_name} ${setting_value}
10 changes: 10 additions & 0 deletions e2e/keywords/deployment.resource
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,21 @@ Library ../libs/keywords/common_keywords.py
Library ../libs/keywords/deployment_keywords.py

*** Keywords ***
#TODO
# these functions can be merged into workload.resource
Create deployment ${deployment_id} with persistentvolumeclaim ${claim_id}
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
${claim_name} = generate_name_with_suffix claim ${claim_id}
create_deployment ${deployment_name} ${claim_name}

Write ${size} MB data to file ${file_name} in deployment ${deployment_id}
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
write_workload_pod_random_data ${deployment_name} ${size} ${file_name}

Check deployment ${deployment_id} data in file ${file_name} is intact
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
check_workload_pod_data_checksum ${deployment_name} ${file_name}

Check deployment ${deployment_id} works
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
write_workload_pod_random_data ${deployment_name} 1024 random-data
Expand Down
32 changes: 32 additions & 0 deletions e2e/keywords/k8s.resource
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
*** Settings ***
Documentation K8s keywords
Library ../libs/keywords/k8s_keywords.py
Library ../libs/keywords/workload_keywords.py
Library ../libs/keywords/volume_keywords.py
Library ../libs/keywords/host_keywords.py

*** Variables ***


*** Keywords ***
Stop volume node kubelet of ${workload_kind} ${workload_id} for ${duration} seconds
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
${node_name} = get_volume_node ${volume_name}
restart_kubelet ${workload_name} ${duration}

Delete volume of ${workload_kind} ${workload_id} volume node
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
${deleted_node} = delete_volume_node ${volume_name}
Set Test Variable ${deleted_node}

Delete volume of ${workload_kind} ${workload_id} replica node
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
${deleted_node} = delete_replica_node ${volume_name}
Set Test Variable ${deleted_node}

Add deleted node back
reboot_node_by_name ${deleted_node}
2 changes: 2 additions & 0 deletions e2e/keywords/persistentvolumeclaim.resource
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ Library ../libs/keywords/common_keywords.py
Library ../libs/keywords/persistentvolumeclaim_keywords.py

*** Keywords ***
#TODO
# these functions can be merged into workload.resource
Create persistentvolumeclaim ${claim_id} using ${volume_type} volume
${claim_name} = generate_name_with_suffix claim ${claim_id}
create_persistentvolumeclaim ${claim_name} ${volume_type}
Expand Down
2 changes: 2 additions & 0 deletions e2e/keywords/statefulset.resource
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ Library ../libs/keywords/common_keywords.py
Library ../libs/keywords/statefulset_keywords.py

*** Keywords ***
#TODO
# these functions can be merged into workload.resource
Create statefulset ${statefulset_id} using ${volume_type} volume
${statefulset_name} = generate_name_with_suffix statefulset ${statefulset_id}
create_statefulset ${statefulset_name} ${volume_type}
Expand Down
27 changes: 23 additions & 4 deletions e2e/keywords/workload.resource
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,16 @@ Documentation Workload Keywords
Library Collections
Library ../libs/keywords/common_keywords.py
Library ../libs/keywords/volume_keywords.py
Library ../libs/keywords/workload_keywords.py

*** Keywords ***
Keep writing data to pod of ${workload_kind} ${workload_id}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
keep_writing_workload_pod_data ${workload_name}

#TODO
# move node related keywords out of workload.resource
Power off volume node of ${workload_kind} ${workload_id} for ${duration} minutes
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
reboot_workload_volume_node ${workload_name} ${duration}
Expand All @@ -18,10 +21,26 @@ Reboot volume node of ${workload_kind} ${workload_id}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
reboot_workload_volume_node ${workload_name}

When Stop volume node kubelet of ${workload_kind} ${workload_id} for ${duration} seconds
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
restart_workload_kubelet ${workload_name} ${duration}

Wait for volume of ${workload_kind} ${workload_id} healthy
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
wait_for_workload_volume_healthy ${workload_name}

Wait until volume of ${workload_kind} ${workload_id} replica rebuidling started on ${replica_locality}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
wait_for_replica_rebuilding_to_complete_on_node ${volume_name} ${replica_locality}

Wait for volume of ${workload_kind} ${workload_id} attached and unknown
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
wait_for_volume_unknown ${volume_name}

Wait for volume of ${workload_kind} ${workload_id} attached and healthy
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
wait_for_volume_healthy ${volume_name}

Delete replica of ${workload_kind} ${workload_id} volume on ${replica_locality}
${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id}
${volume_name} = get_workload_volume_name ${workload_name}
delete_replica_on_node ${volume_name} ${replica_locality}
10 changes: 8 additions & 2 deletions e2e/libs/kubelet/kubelet.py → e2e/libs/k8s/k8s.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import time

from workload.constant import IMAGE_UBUNTU
import subprocess
from workload.pod import create_pod
from workload.pod import delete_pod
from workload.pod import new_pod_manifest
from workload.constant import IMAGE_UBUNTU

from utility.utility import logging

def restart_kubelet(node_name, downtime_in_sec=10):
manifest = new_pod_manifest(
Expand All @@ -19,3 +20,8 @@ def restart_kubelet(node_name, downtime_in_sec=10):
time.sleep(downtime_in_sec)

delete_pod(pod_name)

def delete_node(node_name):
exec_cmd = ["kubectl", "delete", "node", node_name]
res = subprocess.check_output(exec_cmd)
logging(f"Executed command {exec_cmd} with result {res}")
3 changes: 3 additions & 0 deletions e2e/libs/keywords/common_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ class common_keywords:
def __init__(self):
pass

def change_setting(self, setting_name, setting_value):
return NotImplemented

def init_k8s_api_client(self):
init_k8s_api_client()

Expand Down
4 changes: 4 additions & 0 deletions e2e/libs/keywords/host_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
class host_keywords:

def __init__(self):
#TODO
# call BuiltIn().get_library_instance() in keyword init function
# could fail because the keyword instance could not be created yet
# whether it will fail or not will depend on import orders.
self.volume_keywords = BuiltIn().get_library_instance('volume_keywords')

self.host = Host()
Expand Down
21 changes: 21 additions & 0 deletions e2e/libs/keywords/k8s_keywords.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from robot.libraries.BuiltIn import BuiltIn
from k8s.k8s import restart_kubelet
from k8s.k8s import delete_node


class k8s_keywords:

def restart_kubelet(self, node_name, stop_time_in_sec):
restart_kubelet(node_name, int(stop_time_in_sec))

def delete_volume_node(self, volume_name):
volume_keywords = BuiltIn().get_library_instance('volume_keywords')
volume_node = volume_keywords.get_volume_node(volume_name)
delete_node(volume_node)
return volume_node

def delete_replica_node(self, volume_name):
volume_keywords = BuiltIn().get_library_instance('volume_keywords')
replica_node = volume_keywords.get_replica_node(volume_name)
delete_node(replica_node)
return replica_node
10 changes: 0 additions & 10 deletions e2e/libs/keywords/kubelet_keywords.py

This file was deleted.

12 changes: 12 additions & 0 deletions e2e/libs/keywords/volume_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ def get_replica_node_ids(self, volume_name):
node_ids.extend(self.get_node_ids_by_replica_locality(volume_name, "test pod node"))
return node_ids

def get_replica_node(self, volume_name):
return self.get_node_id_by_replica_locality(volume_name, "replica node")

def get_volume_node(self, volume_name):
return self.get_node_id_by_replica_locality(volume_name, "volume node")

def get_node_id_by_replica_locality(self, volume_name, replica_locality):
return self.get_node_ids_by_replica_locality(volume_name, replica_locality)[0]

Expand Down Expand Up @@ -163,3 +169,9 @@ def wait_for_volume_detached(self, volume_name):
def wait_for_volume_healthy(self, volume_name):
logging(f'Waiting for volume {volume_name} to be healthy')
self.volume.wait_for_volume_healthy(volume_name)

def wait_for_volume_degraded(self, volume_name):
self.volume.wait_for_volume_degraded(volume_name)

def wait_for_volume_unknown(self, volume_name):
self.volume.wait_for_volume_unknown(volume_name)
6 changes: 3 additions & 3 deletions e2e/libs/keywords/workload_keywords.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import multiprocessing

from deployment_keywords import deployment_keywords
from kubelet_keywords import kubelet_keywords
from k8s_keywords import k8s_keywords
from host_keywords import host_keywords
from statefulset_keywords import statefulset_keywords
from volume_keywords import volume_keywords
Expand Down Expand Up @@ -32,7 +32,7 @@ class workload_keywords:

def __init__(self):
self.deployment_keywords = deployment_keywords()
self.kubelet_keywords = kubelet_keywords()
self.k8s_keywords = k8s_keywords()
self.host_keywords = host_keywords()
self.statefulset_keywords = statefulset_keywords()
self.volume_keywords = volume_keywords()
Expand Down Expand Up @@ -92,7 +92,7 @@ def reboot_workload_volume_node(self, workload_name, downtime_in_min=1):
def restart_workload_kubelet(self, workload_name, downtime_in_sec):
volume_name = get_workload_volume_name(workload_name)
node_id = self.volume_keywords.get_node_id_by_replica_locality(volume_name, "volume node")
self.kubelet_keywords.restart_kubelet(node_id, downtime_in_sec)
self.k8s_keywords.restart_kubelet(node_id, downtime_in_sec)

def wait_for_workload_pods_running(self, workload_name, namespace="default"):
logging(f'Waiting for {namespace} workload {workload_name} pods running')
Expand Down
4 changes: 3 additions & 1 deletion e2e/libs/node/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@

from utility.utility import get_retry_count_and_interval


#TODO
# it seems that Node not need to be a class
# it's just a collection of utility functions
class Node:

def __init__(self):
Expand Down
31 changes: 12 additions & 19 deletions e2e/libs/volume/crd.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,24 +264,12 @@ def write_random_data(self, volume_name, size):
node_name = self.get(volume_name)["spec"]["nodeID"]
endpoint = self.get_endpoint(volume_name)

dd_command = f"dd if=/dev/urandom of={endpoint} bs=1M count={size} status=none"
dd_command_output = self.node_exec.issue_cmd(node_name, dd_command).strip()

try:
assert dd_command_output == ""
except AssertionError:
logging(f"Failed to write random data to volume {volume_name}. \n"
f"Command: {dd_command}\n"
f"Output: {dd_command_output}")

logging(f"Pausing the test for {self.retry_count} seconds ...")
time.sleep(self.retry_count)

assert dd_command_output == "", \
f"Failed to write random data to volume {volume_name}.\n" \
f"Command: {dd_command}\n" \
f"Output: {dd_command_output}"
return self.node_exec.issue_cmd(node_name, f"md5sum {endpoint} | awk \'{{print $1}}\'")
checksum = self.node_exec.issue_cmd(
node_name,
f"dd if=/dev/urandom of={endpoint} bs=1M count={size} status=none;\
sync;\
md5sum {endpoint} | awk \'{{print $1}}\'")
return checksum

def keep_writing_data(self, volume_name, size):
node_name = self.get(volume_name)["spec"]["nodeID"]
Expand Down Expand Up @@ -327,4 +315,9 @@ def check_data_checksum(self, volume_name, checksum):
actual_checksum = self.node_exec.issue_cmd(
node_name,
f"md5sum {endpoint} | awk \'{{print $1}}\'")
assert actual_checksum == checksum
if actual_checksum != checksum:
message = f"Got {file_path} checksum = {actual_checksum} \
Expected checksum = {checksum}"
logging(message)
time.sleep(self.retry_count)
assert False, message
8 changes: 8 additions & 0 deletions e2e/libs/volume/volume.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,14 @@ def wait_for_volume_healthy(self, volume_name):
def wait_for_volume_expand_to_size(self, volume_name, size):
return self.volume.wait_for_volume_expand_to_size(volume_name, size)

def wait_for_volume_degraded(self, volume_name):
self.volume.wait_for_volume_state(volume_name, "attached")
self.volume.wait_for_volume_robustness(volume_name, "degraded")

def wait_for_volume_unknown(self, volume_name):
self.volume.wait_for_volume_state(volume_name, "attached")
self.volume.wait_for_volume_robustness(volume_name, "unknown")

def get_endpoint(self, volume_name):
return self.volume.get_endpoint(volume_name)

Expand Down
2 changes: 1 addition & 1 deletion e2e/libs/workload/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
IMAGE_LITMUX = 'litmuschaos/go-runner:latest'
IMAGE_UBUNTU = 'ubuntu:16.04'

WAIT_FOR_POD_STABLE_MAX_RETRY = 60
WAIT_FOR_POD_STABLE_MAX_RETRY = 120
Loading

0 comments on commit 4e244c1

Please sign in to comment.