diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 7bb5e5a3..9e6c74e3 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -139,7 +139,7 @@ jobs: strategy: fail-fast: false matrix: - test: ["cli", "cli_change_lb", "cli_change_keys", "cli_change_ns_visibility", "state", "multi_gateway", "server", "grpc", "omap_lock", "log_files", "nsid", "psk", "dhchap"] + test: ["cli", "cli_change_lb", "cli_change_keys", "cli_change_ns_visibility", "state", "multi_gateway", "server", "grpc", "omap_lock", "log_files", "nsid", "psk", "dhchap", "subsys_grp_name_append"] runs-on: ubuntu-latest env: HUGEPAGES: 512 # for multi gateway test, approx 256 per gateway instance diff --git a/control/grpc.py b/control/grpc.py index 3498dea5..62341c82 100644 --- a/control/grpc.py +++ b/control/grpc.py @@ -1174,9 +1174,7 @@ def create_subsystem_safe(self, request, context): error_message=errmsg, nqn=request.subsystem_nqn) - if not request.max_namespaces: - request.max_namespaces = self.max_namespaces_per_subsystem - else: + if request.max_namespaces: if request.max_namespaces > self.max_namespaces: self.logger.warning(f"The requested max number of namespaces for subsystem " f"{request.subsystem_nqn} ({request.max_namespaces}) is " @@ -1232,31 +1230,35 @@ def create_subsystem_safe(self, request, context): error_message=errmsg, nqn=request.subsystem_nqn) - if context: - if request.no_group_append or not self.gateway_group: - self.logger.info("Subsystem NQN will not be changed") - else: - group_name_to_use = self.gateway_group.replace(GatewayState.OMAP_KEY_DELIMITER, - "-") - request.subsystem_nqn += f".{group_name_to_use}" - self.logger.info(f"Subsystem NQN was changed to {request.subsystem_nqn}, " - f"adding the group name") - # Set client ID range according to group id assigned by the monitor offset = self.group_id * CNTLID_RANGE_SIZE min_cntlid = offset + 1 max_cntlid = offset + CNTLID_RANGE_SIZE - if not request.serial_number: - random.seed() - randser = random.randint(2, 99999999999999) - request.serial_number = f"Ceph{randser}" - self.logger.info(f"No serial number specified for {request.subsystem_nqn}, will " - f"use {request.serial_number}") - ret = False omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: + if not request.max_namespaces: + request.max_namespaces = self.max_namespaces_per_subsystem + + if not request.serial_number: + random.seed() + randser = random.randint(2, 99999999999999) + request.serial_number = f"Ceph{randser}" + self.logger.info(f"No serial number specified for {request.subsystem_nqn}, will " + f"use {request.serial_number}") + + if context: + + if request.no_group_append or not self.gateway_group: + self.logger.info("Subsystem NQN will not be changed") + else: + group_name_to_use = self.gateway_group.replace( + GatewayState.OMAP_KEY_DELIMITER, "-") + request.subsystem_nqn += f".{group_name_to_use}" + request.no_group_append = True + self.logger.info(f"Subsystem NQN was changed to {request.subsystem_nqn}, " + f"adding the group name") errmsg = "" try: subsys_using_serial = None @@ -2274,7 +2276,7 @@ def list_namespaces(self, request, context=None): """List namespaces.""" peer_msg = self.get_peer_message(context) - if request.nsid is None or request.nsid == 0: + if not request.nsid: if request.uuid: nsid_msg = f"namespace with UUID {request.uuid}" else: @@ -2567,28 +2569,28 @@ def namespace_set_qos_limits_safe(self, request, context): f"limits are set for namespace {request.nsid} on " f"{request.subsystem_nqn}") - # Merge current limits with previous ones, if exist - if ns_qos_entry: - if not request.HasField("rw_ios_per_second") and ns_qos_entry.get( - "rw_ios_per_second") is not None: - request.rw_ios_per_second = int(ns_qos_entry["rw_ios_per_second"]) - if not request.HasField("rw_mbytes_per_second") and ns_qos_entry.get( - "rw_mbytes_per_second") is not None: - request.rw_mbytes_per_second = int(ns_qos_entry["rw_mbytes_per_second"]) - if not request.HasField("r_mbytes_per_second") and ns_qos_entry.get( - "r_mbytes_per_second") is not None: - request.r_mbytes_per_second = int(ns_qos_entry["r_mbytes_per_second"]) - if not request.HasField("w_mbytes_per_second") and ns_qos_entry.get( - "w_mbytes_per_second") is not None: - request.w_mbytes_per_second = int(ns_qos_entry["w_mbytes_per_second"]) - - limits_to_set = self.get_qos_limits_string(request) - self.logger.debug(f"After merging current QOS limits with previous ones for " - f"namespace {request.nsid} on {request.subsystem_nqn}," - f"{limits_to_set}") - omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: + # Merge current limits with previous ones, if exist + if ns_qos_entry: + assert context, "Shouldn't get here on an update" + if not request.HasField("rw_ios_per_second") and ns_qos_entry.get( + "rw_ios_per_second") is not None: + request.rw_ios_per_second = int(ns_qos_entry["rw_ios_per_second"]) + if not request.HasField("rw_mbytes_per_second") and ns_qos_entry.get( + "rw_mbytes_per_second") is not None: + request.rw_mbytes_per_second = int(ns_qos_entry["rw_mbytes_per_second"]) + if not request.HasField("r_mbytes_per_second") and ns_qos_entry.get( + "r_mbytes_per_second") is not None: + request.r_mbytes_per_second = int(ns_qos_entry["r_mbytes_per_second"]) + if not request.HasField("w_mbytes_per_second") and ns_qos_entry.get( + "w_mbytes_per_second") is not None: + request.w_mbytes_per_second = int(ns_qos_entry["w_mbytes_per_second"]) + + limits_to_set = self.get_qos_limits_string(request) + self.logger.debug(f"After merging current QOS limits with previous ones for " + f"namespace {request.nsid} on {request.subsystem_nqn}," + f"{limits_to_set}") try: ret = rpc_bdev.bdev_set_qos_limit( self.spdk_rpc_client, diff --git a/control/state.py b/control/state.py index 8b51f00f..80a12afc 100644 --- a/control/state.py +++ b/control/state.py @@ -1048,7 +1048,7 @@ def update(self) -> bool: local_version = self.omap.get_local_version() self.logger.info(f"Check local version {local_version} against OMAP version " - f"{omap_version}") + f"{omap_version} ({self.id_text}).") if local_version < omap_version: self.logger.info(f"Start update from {local_version} to {omap_version} " f"({self.id_text}).") diff --git a/tests/test_subsys_grp_name_append.py b/tests/test_subsys_grp_name_append.py new file mode 100755 index 00000000..ccbc3753 --- /dev/null +++ b/tests/test_subsys_grp_name_append.py @@ -0,0 +1,87 @@ +import pytest +from control.server import GatewayServer +from control.cli import main as cli +from control.cephutils import CephUtils +import grpc +from control.proto import gateway_pb2_grpc as pb2_grpc +import copy + +image = "mytestdevimage" +pool = "rbd" +subsystem_prefix = "nqn.2016-06.io.spdk:cnode" +config = "ceph-nvmeof.conf" +group_name = "group1" + + +@pytest.fixture(scope="module") +def two_gateways(config): + """Sets up and tears down two Gateways""" + nameA = "GatewayAA" + nameB = "GatewayBB" + sockA = f"spdk_{nameA}.sock" + sockB = f"spdk_{nameB}.sock" + config.config["gateway-logs"]["log_level"] = "debug" + config.config["gateway"]["group"] = group_name + config.config["gateway"]["rebalance_period_sec"] = "0" + config.config["gateway"]["state_update_interval_sec"] = "360" + config.config["gateway"]["state_update_notify"] = "False" + addr = config.get("gateway", "addr") + configA = copy.deepcopy(config) + configB = copy.deepcopy(config) + configA.config["gateway"]["name"] = nameA + configA.config["gateway"]["override_hostname"] = nameA + configA.config["spdk"]["rpc_socket_name"] = sockA + configA.config["spdk"]["tgt_cmd_extra_args"] = "-m 0x03" + portA = configA.getint("gateway", "port") + configB.config["gateway"]["name"] = nameB + configB.config["gateway"]["override_hostname"] = nameB + configB.config["spdk"]["rpc_socket_name"] = sockB + portB = portA + 2 + discPortB = configB.getint("discovery", "port") + 1 + configB.config["gateway"]["port"] = str(portB) + configB.config["discovery"]["port"] = str(discPortB) + configB.config["spdk"]["tgt_cmd_extra_args"] = "-m 0x0C" + + ceph_utils = CephUtils(config) + with (GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB): + ceph_utils.execute_ceph_monitor_command( + "{" + f'"prefix":"nvme-gw create", "id": "{nameA}", "pool": "{pool}", ' + f'"group": "{group_name}"' + "}" + ) + ceph_utils.execute_ceph_monitor_command( + "{" + f'"prefix":"nvme-gw create", "id": "{nameB}", "pool": "{pool}", ' + f'"group": "{group_name}"' + "}" + ) + gatewayA.serve() + gatewayB.serve() + + channelA = grpc.insecure_channel(f"{addr}:{portA}") + stubA = pb2_grpc.GatewayStub(channelA) + channelB = grpc.insecure_channel(f"{addr}:{portB}") + stubB = pb2_grpc.GatewayStub(channelB) + + yield gatewayA, stubA, gatewayB, stubB + gatewayA.gateway_rpc.gateway_state.delete_state() + gatewayB.gateway_rpc.gateway_state.delete_state() + gatewayA.server.stop(grace=1) + gatewayB.server.stop(grace=1) + + +def test_create_subsystems(caplog, two_gateways): + gatewayA, stubA, gatewayB, stubB = two_gateways + for i in range(20): + caplog.clear() + subsystem = f"{subsystem_prefix}{i}" + cli(["subsystem", "add", "--subsystem", subsystem]) + subsystem += f".{group_name}" + assert f"Adding subsystem {subsystem}: Successful" in caplog.text + caplog.clear() + subsystem = f"{subsystem_prefix}X" + cli(["--server-port", "5502", "subsystem", "add", "--subsystem", subsystem]) + subsystem += f".{group_name}" + assert "differs from OMAP file version" in caplog.text + assert "The file is not current, will reload it and try again" in caplog.text + assert f"Adding subsystem {subsystem}: Successful" in caplog.text + caplog.clear() + cli(["--format", "json", "subsystem", "list"]) + assert f".{group_name}.{{group_name}}" not in caplog.text