From 1aff6271d9b5dfe8b0b79273ab8e4ff9a483999f Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 20 Sep 2024 10:38:35 +0800 Subject: [PATCH] test(robot): add drain cases from manual test cases longhorn/longhorn-9292 Signed-off-by: Chris --- e2e/keywords/common.resource | 2 + e2e/keywords/host.resource | 5 + e2e/keywords/k8s.resource | 61 ++++++++++++- e2e/keywords/longhorn.resource | 10 ++ e2e/keywords/volume.resource | 6 ++ e2e/libs/k8s/k8s.py | 27 +++++- e2e/libs/keywords/host_keywords.py | 3 + e2e/libs/keywords/k8s_keywords.py | 30 +++++- e2e/libs/keywords/node_keywords.py | 4 + e2e/libs/node/node.py | 10 ++ e2e/libs/utility/utility.py | 4 + e2e/tests/negative/node_drain.robot | 137 ++++++++++++++++++++++++++++ 12 files changed, 294 insertions(+), 5 deletions(-) diff --git a/e2e/keywords/common.resource b/e2e/keywords/common.resource index 642aaae18f..17e2902876 100644 --- a/e2e/keywords/common.resource +++ b/e2e/keywords/common.resource @@ -19,6 +19,7 @@ Library ../libs/keywords/setting_keywords.py Library ../libs/keywords/backupstore_keywords.py Library ../libs/keywords/backup_keywords.py Library ../libs/keywords/sharemanager_keywords.py +Library ../libs/keywords/k8s_keywords.py *** Keywords *** Set test environment @@ -35,6 +36,7 @@ Set test environment END Cleanup test resources + uncordon_all_nodes cleanup_control_plane_network_latency reset_node_schedule cleanup_node_exec diff --git a/e2e/keywords/host.resource b/e2e/keywords/host.resource index c15ffe8f0d..0ad7047162 100644 --- a/e2e/keywords/host.resource +++ b/e2e/keywords/host.resource @@ -37,3 +37,8 @@ Restart cluster Power on off node Run keyword And Continue On Failure ... power_on_node_by_name ${powered_off_node} + +Power off node ${node_id} + ${powered_off_node} = get_node_by_index ${node_id} + power_off_node_by_name ${powered_off_node} + Set Test Variable ${powered_off_node} \ No newline at end of file diff --git a/e2e/keywords/k8s.resource b/e2e/keywords/k8s.resource index 75ff214d4e..24fd068b21 100644 --- a/e2e/keywords/k8s.resource +++ b/e2e/keywords/k8s.resource @@ -5,9 +5,10 @@ Library ../libs/keywords/k8s_keywords.py Library ../libs/keywords/workload_keywords.py Library ../libs/keywords/volume_keywords.py Library ../libs/keywords/host_keywords.py +Library ../libs/keywords/node_keywords.py *** Variables *** - +${DRAIN_TIMEOUT} 90 *** Keywords *** Stop volume node kubelet of ${workload_kind} ${workload_id} for ${duration} seconds @@ -51,5 +52,63 @@ Force drain volume of ${workload_kind} ${workload_id} replica node Set Test Variable ${drained_node} Set Test Variable ${last_volume_node} +Force drain node ${node_id} + ${node_name} = get_node_by_index ${node_id} + force_drain_node ${node_name} + +Drain volume of ${workload_kind} ${workload_id} volume node + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${drained_node} = get_volume_node ${volume_name} + ${last_volume_node} = get_volume_node ${volume_name} + drain_node ${drained_node} + wait_for_all_pods_evicted ${drained_node} + Set Test Variable ${drained_node} + Set Test Variable ${last_volume_node} + Uncordon the drained node uncordon_node ${drained_node} + +Cordon ${workload_kind} ${workload_id} volume node + ${workload_name} = generate_name_with_suffix ${workload_kind} ${workload_id} + ${volume_name} = get_workload_volume_name ${workload_name} + ${volume_node} = get_volume_node ${volume_name} + cordon_node ${volume_node} + check_node_is_not_schedulable ${volume_node} + +Force drain all nodes + FOR ${node_id} IN RANGE 0 3 + ${node_name} = get_node_by_index ${node_id} + force_drain_node ${node_name} + wait_for_all_pods_evicted ${node_name} + END + +Check node ${node_id} cordoned + ${node_name} = get_node_by_index ${node_id} + check_node_cordoned ${node_name} + +Force drain node ${node_id} and expect failure + ${drained_node} = get_node_by_index ${node_id} + ${instance_manager_name} = get_instance_manager_on_node ${drained_node} + Run Keyword And Expect Error * force_drain_node ${drained_node} + Set Test Variable ${instance_manager_name} + Set Test Variable ${drained_node} + +Force drain node ${node_id} and expect success + ${drained_node} = get_node_by_index ${node_id} + ${instance_manager_name} = get_instance_manager_on_node ${drained_node} + force_drain_node ${drained_node} + Set Test Variable ${instance_manager_name} + Set Test Variable ${drained_node} + +The drain process not completed + check_drain_process_not_completed ${drain_process} + +The drain process completed + wait_for_all_pods_evicted ${drained_node} + check_drain_process_completed ${drain_process} + +Check PDB not exist + [Arguments] ${instance_manger} + check_instance_manager_pdb_not_exist ${instance_manger} + diff --git a/e2e/keywords/longhorn.resource b/e2e/keywords/longhorn.resource index 377a7e38a9..b181bec88a 100644 --- a/e2e/keywords/longhorn.resource +++ b/e2e/keywords/longhorn.resource @@ -3,6 +3,7 @@ Documentation Longhorn Keywords Library ../libs/keywords/instancemanager_keywords.py Library ../libs/keywords/workload_keywords.py +Library ../libs/keywords/k8s_keywords.py *** Variables *** @{longhorn_workloads} @@ -43,3 +44,12 @@ Check Longhorn workload pods ${condition} annotated with ${key} Run Keyword IF '${condition}' == 'not' Should Not Be True ${is_annotated} ... ELSE IF '${condition}' == 'is' Should Be True ${is_annotated} ... ELSE Fail Invalid condition ${condition} + +Check instance-manager pod is not running on drained node + ${pod} = get_instance_manager_on_node ${drained_node} + Should Be Equal ${pod} ${None} + +Check instance-manager pod is running on node ${node_id} + ${node_name} = get_node_by_index ${node_id} + ${pod} = get_instance_manager_on_node ${node_name} + Should Not Be Equal ${pod} ${None} \ No newline at end of file diff --git a/e2e/keywords/volume.resource b/e2e/keywords/volume.resource index 085188f074..1887b3b1fc 100644 --- a/e2e/keywords/volume.resource +++ b/e2e/keywords/volume.resource @@ -206,6 +206,12 @@ Wait volume ${volume_id} replica on node ${node_id} stopped ${node_name} = get_node_by_index ${node_id} wait_for_replica_stopped ${volume_name} ${node_name} +Check volume ${volume_id} replica on node ${node_id} exist + ${volume_name} = generate_name_with_suffix volume ${volume_id} + ${node_name} = get_node_by_index ${node_id} + ${replica_name} get_replica_name_on_node ${volume_name} ${node_name} + Should Not Be Equal ${replica_name} ${None} + Check volume ${volume_id} data is intact ${volume_name} = generate_name_with_suffix volume ${volume_id} check_data_checksum ${volume_name} diff --git a/e2e/libs/k8s/k8s.py b/e2e/libs/k8s/k8s.py index ea854b8fa6..3002c83e5f 100644 --- a/e2e/libs/k8s/k8s.py +++ b/e2e/libs/k8s/k8s.py @@ -1,6 +1,7 @@ import time import subprocess import asyncio +import os from kubernetes import client from workload.pod import create_pod from workload.pod import delete_pod @@ -9,6 +10,8 @@ from utility.utility import subprocess_exec_cmd from utility.utility import logging from utility.utility import get_retry_count_and_interval +from utility.utility import subprocess_exec_cmd_with_timeout +from robot.libraries.BuiltIn import BuiltIn async def restart_kubelet(node_name, downtime_in_sec=10): manifest = new_pod_manifest( @@ -32,9 +35,9 @@ def drain_node(node_name): exec_cmd = ["kubectl", "drain", node_name, "--ignore-daemonsets", "--delete-emptydir-data"] res = subprocess_exec_cmd(exec_cmd) -def force_drain_node(node_name): +def force_drain_node(node_name, timeout): exec_cmd = ["kubectl", "drain", node_name, "--force", "--ignore-daemonsets", "--delete-emptydir-data"] - res = subprocess_exec_cmd(exec_cmd) + res = subprocess_exec_cmd_with_timeout(exec_cmd, timeout) def cordon_node(node_name): exec_cmd = ["kubectl", "cordon", node_name] @@ -71,3 +74,23 @@ def wait_all_pods_evicted(node_name): time.sleep(retry_interval) assert evicted, 'failed to evict pods' + +def check_node_cordoned(node_name): + api = client.CoreV1Api() + node = api.read_node(node_name) + assert node.spec.unschedulable is True, f"node {node_name} is not cordoned." + +def get_instance_manager_on_node(node_name): + data_engine = BuiltIn().get_variable_value("${DATA_ENGINE}") + pods = get_all_pods_on_node(node_name) + for pod in pods: + labels = pod.metadata.labels + if labels.get("longhorn.io/data-engine") == data_engine and \ + labels.get("longhorn.io/component") == "instance-manager": + return pod.metadata.name + return None + +def check_instance_manager_pdb_not_exist(instance_manager): + exec_cmd = ["kubectl", "get", "pdb", "-n", "longhorn-system"] + res = subprocess_exec_cmd(exec_cmd) + assert instance_manager not in res.decode('utf-8') diff --git a/e2e/libs/keywords/host_keywords.py b/e2e/libs/keywords/host_keywords.py index bd4f6c7ef8..b2a4fe64c2 100644 --- a/e2e/libs/keywords/host_keywords.py +++ b/e2e/libs/keywords/host_keywords.py @@ -52,3 +52,6 @@ def power_off_volume_node(self, volume_name): def power_on_node_by_name(self, node_name): self.host.power_on_node(node_name) + + def power_off_node_by_name(self, node_name): + self.host.power_off_node(node_name) diff --git a/e2e/libs/keywords/k8s_keywords.py b/e2e/libs/keywords/k8s_keywords.py index 625696a8d3..ed5b0b7f84 100644 --- a/e2e/libs/keywords/k8s_keywords.py +++ b/e2e/libs/keywords/k8s_keywords.py @@ -5,8 +5,12 @@ from k8s.k8s import drain_node, force_drain_node from k8s.k8s import cordon_node, uncordon_node from k8s.k8s import wait_all_pods_evicted +from k8s.k8s import get_all_pods_on_node +from k8s.k8s import check_node_cordoned +from k8s.k8s import get_instance_manager_on_node +from k8s.k8s import check_instance_manager_pdb_not_exist from utility.utility import logging - +from node import Node class k8s_keywords: @@ -45,10 +49,32 @@ def drain_node(self, node_name): drain_node(node_name) def force_drain_node(self, node_name): - force_drain_node(node_name) + timeout = int(BuiltIn().get_variable_value("${DRAIN_TIMEOUT}", default="90")) + force_drain_node(node_name, timeout) def uncordon_node(self, node_name): uncordon_node(node_name) + def cordon_node(self, node_name): + cordon_node(node_name) + def wait_for_all_pods_evicted(self, node_name): wait_all_pods_evicted(node_name) + + def uncordon_all_nodes(self): + nodes = Node.list_node_names_by_role("worker") + + for node_name in nodes: + uncordon_node(node_name) + + def get_all_pods_on_node(self, node_name): + return get_all_pods_on_node(node_name) + + def check_node_cordoned(self, node_name): + check_node_cordoned(node_name) + + def get_instance_manager_on_node(self, node_name): + return get_instance_manager_on_node(node_name) + + def check_instance_manager_pdb_not_exist(self, instance_manager): + return check_instance_manager_pdb_not_exist(instance_manager) diff --git a/e2e/libs/keywords/node_keywords.py b/e2e/libs/keywords/node_keywords.py index 74ef9055bf..bd53dcfdf6 100644 --- a/e2e/libs/keywords/node_keywords.py +++ b/e2e/libs/keywords/node_keywords.py @@ -40,5 +40,9 @@ def enable_node_scheduling(self, node_name): def reset_node_schedule(self): nodes = self.node.list_node_names_by_role("worker") + for node_name in nodes: self.enable_node_scheduling(node_name) + + def check_node_is_not_schedulable(self, node_name): + self.node.check_node_schedulable(node_name, schedulable="False") diff --git a/e2e/libs/node/node.py b/e2e/libs/node/node.py index 565fe0c971..dc1491986c 100644 --- a/e2e/libs/node/node.py +++ b/e2e/libs/node/node.py @@ -10,6 +10,8 @@ from utility.utility import get_retry_count_and_interval from utility.utility import logging +from k8s.k8s import uncordon_node + class Node: DEFAULT_DISK_PATH = "/var/lib/longhorn/" @@ -159,3 +161,11 @@ def set_default_disk_scheduling(self, node_name, allowScheduling): if disk.path == self.DEFAULT_DISK_PATH: disk.allowScheduling = allowScheduling self.update_disks(node_name, node.disks) + + def check_node_schedulable(self, node_name, schedulable): + node = get_longhorn_client().by_id_node(node_name) + for _ in range(self.retry_count): + if node["conditions"]["Schedulable"]["status"] == schedulable: + break + time.sleep(self.retry_interval) + assert node["conditions"]["Schedulable"]["status"] == schedulable diff --git a/e2e/libs/utility/utility.py b/e2e/libs/utility/utility.py index 8f3521bb6b..70c6ab3a4f 100644 --- a/e2e/libs/utility/utility.py +++ b/e2e/libs/utility/utility.py @@ -86,6 +86,10 @@ def subprocess_exec_cmd(cmd): logging(f"Executed command {cmd} with result {res}") return res +def subprocess_exec_cmd_with_timeout(cmd, timeout): + res = subprocess.check_output(cmd, timeout=timeout) + logging(f"Executed command {cmd} with timeout {timeout}s, result {res}") + return res def wait_for_cluster_ready(): core_api = client.CoreV1Api() diff --git a/e2e/tests/negative/node_drain.robot b/e2e/tests/negative/node_drain.robot index e220ceb845..a179742425 100644 --- a/e2e/tests/negative/node_drain.robot +++ b/e2e/tests/negative/node_drain.robot @@ -10,6 +10,10 @@ Resource ../keywords/k8s.resource Resource ../keywords/deployment.resource Resource ../keywords/workload.resource Resource ../keywords/setting.resource +Resource ../keywords/longhorn.resource +Resource ../keywords/volume.resource +Resource ../keywords/host.resource +Resource ../keywords/node.resource Test Setup Set test environment Test Teardown Cleanup test resources @@ -89,3 +93,136 @@ Force Drain Replica Node While Replica Rebuilding And Wait for deployment 1 pods stable And Check deployment 1 data in file data.txt is intact END + +Drain node with force + [Documentation] Drain node with force + ... 1. Deploy a cluster contains 3 worker nodes N1, N2, N3. + ... 2. Deploy Longhorn. + ... 3. Create a 1-replica deployment with a 3-replica Longhorn volume. The volume is attached to N1. + ... 4. Write some data to the volume and get the md5sum. + ... 5. Force drain and remove N2, which contains one replica only. + ... kubectl drain --delete-emptydir-data=true --force=true --grace-period=-1 --ignore-daemonsets=true --timeout= + ... 6. Wait for the volume Degraded. + ... 7. Force drain and remove N1, which is the node the volume is attached to. + ... kubectl drain --delete-emptydir-data=true --force=true --grace-period=-1 --ignore-daemonsets=true --timeout= + ... + ... 8. Wait for the volume detaching then being recovered. Will get attached to the workload/node. + ... 9. Verify the instance manager pods are gone and not recreated after the drain. + ... 10. Validate the volume content. The data is intact. + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 2048 MB data to file data.txt in deployment 0 + + And Force drain volume of deployment 0 replica node + Then Wait for volume of deployment 0 attached to the original node and degraded + + And Force drain volume of deployment 0 volume node + And Wait for volume of deployment 0 attached to another node and degraded + And Check instance-manager pod is not running on drained node + Then Check deployment 0 data in file data.txt is intact + +Drain node without force + [Documentation] Drain node without force + ... 1. Cordon the node. Longhorn will automatically disable the node scheduling when a Kubernetes node is cordoned. + ... 2. Evict all the replicas from the node. + ... 3. Run the following command to drain the node with force flag set to false. + ... kubectl drain --delete-emptydir-data --force=false --grace-period=-1 --ignore-daemonsets=true --timeout= + ... 4. Observe that the workloads move to another node. The volumes should first detach and attach to workloads once they move to another node. + ... 5. One by one all the pods should get evicted. + ... 6. Verify the instance manager pods are gone and not recreated after the drain. + Given Create storageclass longhorn-test with dataEngine=${DATA_ENGINE} + And Create persistentvolumeclaim 0 using RWO volume with longhorn-test storageclass + And Create deployment 0 with persistentvolumeclaim 0 + And Wait for volume of deployment 0 healthy + And Write 2048 MB data to file data.txt in deployment 0 + + When Cordon deployment 0 volume node + And Delete replica of deployment 0 volume on volume node + And Drain volume of deployment 0 volume node + And Wait for volume of deployment 0 attached to another node and degraded + And Check instance-manager pod is not running on drained node + Then Check deployment 0 data in file data.txt is intact + +Test kubectl drain nodes for PVC/PV/LHV is created through Longhorn API + [Documentation] Test kubectl drain nodes for PVC/PV/LHV is created through Longhorn API + ... Given 1 PVC/PV/LHV created through Longhorn API And LHV is not yet attached/replicated. + ... When kubectl drain nodes. + ... NODE=centos-worker-0 + ... kubectl cordon ${NODE} + ... kubectl drain --force --ignore-daemonsets --delete-emptydir-data --grace-period=10 ${NODE} + ... Then all node should successfully drain. + When Create volume 0 with dataEngine=${DATA_ENGINE} + And Wait for volume 0 detached + And Create persistentvolume for volume 0 + And Create persistentvolumeclaim for volume 0 + And Force drain all nodes + +Stopped replicas on deleted nodes should not be counted as healthy replicas when draining nodes + [Documentation] Stopped replicas on deleted nodes should not be counted as healthy replicas when draining nodes + ... When draining a node, the node will be set as unscheduled and all pods should be evicted. + ... By Longhorn’s default settings, the replica will only be evicted if there is another healthy replica on the running node. + ... Related Issue: + ... - https://github.com/longhorn/longhorn/issues/2237 + ... + ... Given Longhorn with 2 nodes cluster: Node_1, Node_2 + ... And Update the Node Drain Policy setting to block-if-contains-last-replica + ... And Create a 5Gi detached volume with 2 replicas. + ... And Stop Node_1 that contains one of the replicas. + ... When Attempts to drain Node_2 that contains remaining replica. + ... kubectl drain --delete-emptydir-data=true --force=true --grace-period=-1 --ignore-daemonsets=true + ... Then The Node_2 becomes cordoned. + ... And All pods on Node_2 are evicted except the replica instance manager pod. + ... kubectl get pods --field-selector spec.nodeName= -o wide -n longhorn-system + ... And The last healthy replica exists on the Node_2. + [Teardown] Cleanup test resources include off nodes + Given Disable node 0 scheduling + And Set setting node-drain-policy to block-if-contains-last-replica + And Given Create volume 0 with size=5Gi numberOfReplicas=2 dataEngine=${DATA_ENGINE} + And Attach volume 0 to node 1 + And Wait for volume 0 healthy + And Write data to volume 0 + And Detach volume 0 from attached node + And Wait for volume 0 detached + And Power off node 1 + + When Force drain node 2 and expect failure + And Check instance-manager pod is running on node 2 + And Check volume 0 replica on node 2 exist + +Setting Allow Node Drain with the Last Healthy Replica protects the last healthy replica with Pod Disruption Budget (PDB) + [Documentation] Setting Allow Node Drain with the Last Healthy Replica protects the last healthy replica with Pod Disruption Budget (PDB) + ... Related Issue: + ... - https://github.com/longhorn/longhorn/issues/2237 + ... + ... Given Longhorn with 2 nodes cluster: Node_1, Node_2 + ... And Update the Node Drain Policy setting to block-if-contains-last-replica and confirm with following command: + ... kubectl get settings.longhorn.io/node-drain-policy -n longhorn-system + ... And Create a 5Gi detached volume with 2 replicas. + ... And Stop Node_1 that contains one of the replicas. + ... And Drain Node_2 so that all pods on Node_2 are evicted, but the replica instance manager pod is still on Node_2 because it is protected by PDB. + ... kubectl drain --delete-emptydir-data=true --force=true --grace-period=-1 --ignore-daemonsets=true + ... And Update the Node Drain Policy setting to always-allow and confirm with following command: + ... kubectl get settings.longhorn.io/node-drain-policy -n longhorn-system + ... Then The pod longhorn-system/instance-manager-r-xxxxxxxx will be evicted successfully and the following command can be used to ensure that only daemonset pods such as engine-image, longhorn-csi-plugin and longhorn-manager daemonset pods are running on Node_2: + ... kubectl get pods --field-selector spec.nodeName= -o wide -n longhorn-system + ... And The PDB will be deleted and can be verified with the following command: + ... kubectl get pdb -n longhorn-system + [Teardown] Cleanup test resources include off nodes + Given Disable node 0 scheduling + And Set setting node-drain-policy to block-if-contains-last-replica + And Given Create volume 0 with size=5Gi numberOfReplicas=2 dataEngine=${DATA_ENGINE} + And Attach volume 0 to node 1 + And Wait for volume 0 healthy + And Write data to volume 0 + And Detach volume 0 from attached node + And Wait for volume 0 detached + And Power off node 1 + + When Force drain node 2 and expect failure + And Check instance-manager pod is running on node 2 + + When Set setting node-drain-policy to always-allow + And Force drain node 2 and expect success + And Check PDB not exist instance_manger=${instance_manager_name}