From 279b26098e8fdce5cef272e72171fe1fed851656 Mon Sep 17 00:00:00 2001 From: Gil Bregman Date: Tue, 7 May 2024 11:45:43 +0300 Subject: [PATCH] Fail namespace creation if passed load balancing group doesn't exist Fixes #627 Signed-off-by: Gil Bregman --- control/cephutils.py | 2 + control/grpc.py | 21 ++- tests/test_cli.py | 334 +++++++++++++++++++++++------------- tests/test_grpc.py | 9 +- tests/test_log_files.py | 2 + tests/test_multi_gateway.py | 8 +- tests/test_namespaces.py | 2 + tests/test_nsid.py | 11 +- tests/test_old_omap.py | 3 + tests/test_omap_lock.py | 16 +- tests/test_server.py | 6 + 11 files changed, 270 insertions(+), 144 deletions(-) diff --git a/control/cephutils.py b/control/cephutils.py index 4ba3a7915..000722c96 100644 --- a/control/cephutils.py +++ b/control/cephutils.py @@ -26,8 +26,10 @@ def __init__(self, config): self.last_sent = time.time() def execute_ceph_monitor_command(self, cmd): + self.logger.debug(f"Execute monitor command: {cmd}") with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: rply = cluster.mon_command(cmd, b'') + self.logger.debug(f"Monitor reply: {rply}") return rply def get_number_created_gateways(self, pool, group): diff --git a/control/grpc.py b/control/grpc.py index f100d9e54..8d0815db6 100644 --- a/control/grpc.py +++ b/control/grpc.py @@ -529,7 +529,7 @@ def create_subsystem_safe(self, request, context): peer_msg = self.get_peer_message(context) self.logger.info( - f"Received request to create subsystem {request.subsystem_nqn}, enable_ha: {request.enable_ha}, context: {context}{peer_msg}") + f"Received request to create subsystem {request.subsystem_nqn}, enable_ha: {request.enable_ha}, max_namespaces: {request.max_namespaces}, context: {context}{peer_msg}") if not request.enable_ha: errmsg = f"{create_subsystem_error_prefix}: HA must be enabled for subsystems" @@ -592,7 +592,7 @@ def create_subsystem_safe(self, request, context): ana_reporting = enable_ha, ) self.subsys_ha[request.subsystem_nqn] = enable_ha - self.subsys_max_ns[request.subsystem_nqn] = request.max_namespaces if request.max_namespaces is not None else 32 + self.subsys_max_ns[request.subsystem_nqn] = request.max_namespaces if request.max_namespaces else 32 self.logger.debug(f"create_subsystem {request.subsystem_nqn}: {ret}") except Exception as ex: self.logger.exception(create_subsystem_error_prefix) @@ -957,8 +957,17 @@ def namespace_add_safe(self, request, context): if not context: create_image = False else: # new namespace - if request.anagrpid == 0: + # If an explicit load balancing group was passed, make sure it exists + if request.anagrpid != 0: + grps_list = self.ceph_utils.get_number_created_gateways(self.gateway_pool, self.gateway_group) + if request.anagrpid not in grps_list: + self.logger.debug(f"ANA groups: {grps_list}") + errmsg = f"Failure adding namespace {nsid_msg}to {request.subsystem_nqn}: Load balancing group {request.anagrpid} doesn't exist" + self.logger.error(errmsg) + return pb2.req_status(status=errno.ENODEV, error_message=errmsg) + else: anagrp = self.choose_anagrpid_for_namespace(request.nsid) + assert anagrp != 0 # if anagrp == 0: # errmsg = f"Failure adding namespace with automatic ana group load balancing {nsid_msg} to {request.subsystem_nqn}" # self.logger.error(errmsg) @@ -1036,6 +1045,12 @@ def namespace_change_load_balancing_group_safe(self, request, context): self.logger.info(f"Received request to change load balancing group for namespace {nsid_msg}in {request.subsystem_nqn} to {request.anagrpid}, context: {context}{peer_msg}") with self.omap_lock(context=context): + grps_list = self.ceph_utils.get_number_created_gateways(self.gateway_pool, self.gateway_group) + if request.anagrpid not in grps_list: + self.logger.debug(f"ANA groups: {grps_list}") + errmsg = f"Failure changing load balancing group for namespace {nsid_msg}in {request.subsystem_nqn}: Load balancing group {request.anagrpid} doesn't exist" + self.logger.error(errmsg) + return pb2.req_status(status=errno.ENODEV, error_message=errmsg) find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, request.uuid, False, f"Failure changing load balancing group for namespace {nsid_msg}in {request.subsystem_nqn}") if not find_ret[0]: diff --git a/tests/test_cli.py b/tests/test_cli.py index 1cda727f1..c12bfae1e 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -8,21 +8,23 @@ from control.proto import gateway_pb2 as pb2 from control.proto import gateway_pb2_grpc as pb2_grpc import os +import copy image = "mytestdevimage" image2 = "mytestdevimage2" image3 = "mytestdevimage3" +image4 = "mytestdevimage4" pool = "rbd" subsystem = "nqn.2016-06.io.spdk:cnode1" subsystem2 = "nqn.2016-06.io.spdk:cnode2" discovery_nqn = "nqn.2014-08.org.nvmexpress.discovery" serial = "Ceph00000000000001" uuid = "948878ee-c3b2-4d58-a29b-2cff713fc02d" +uuid2 = "948878ee-c3b2-4d58-a29b-2cff713fc02e" host_list = ["nqn.2016-06.io.spdk:host1", "*"] nsid = "1" -nsid_ipv6 = "3" -anagrpid = "2" -anagrpid2 = "4" +anagrpid = "1" +anagrpid2 = "2" host_name = socket.gethostname() addr = "127.0.0.1" addr_ipv6 = "::1" @@ -51,6 +53,7 @@ def gateway(config): gateway.gw_logger_object.set_log_level("debug") gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") # Bind the client and Gateway channel = grpc.insecure_channel(f"{addr}:{port}") @@ -61,17 +64,68 @@ def gateway(config): gateway.server.stop(grace=1) gateway.gateway_rpc.gateway_state.delete_state() +@pytest.fixture(scope="module") +def two_gateways(config): + """Sets up and tears down two Gateways""" + nameA = "GatewayAA" + nameB = "GatewayBB" + sockA = "spdk_GatewayAA.sock" + sockB = "spdk_GatewayBB.sock" + config.config["gateway-logs"]["log_level"] = "debug" + addr = config.get("gateway", "addr") + configA = copy.deepcopy(config) + configB = copy.deepcopy(config) + configA.config["gateway"]["name"] = nameA + configA.config["gateway"]["override_hostname"] = nameA + configA.config["spdk"]["rpc_socket_name"] = sockA + portA = configA.getint("gateway", "port") + 1 + configA.config["gateway"]["port"] = str(portA) + discPortA = configA.getint("discovery", "port") + 1 + configA.config["discovery"]["port"] = str(discPortA) + configA.config["spdk"]["tgt_cmd_extra_args"] = "--disable-cpumask-locks" + configB.config["gateway"]["name"] = nameB + configB.config["gateway"]["override_hostname"] = nameB + configB.config["spdk"]["rpc_socket_name"] = sockB + portB = portA + 2 + discPortB = discPortA + 1 + configB.config["gateway"]["port"] = str(portB) + discPort = configB.getint("discovery", "port") + 1 + configB.config["discovery"]["port"] = str(discPortB) + configB.config["spdk"]["tgt_cmd_extra_args"] = "--disable-cpumask-locks" + + with (GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB): + gatewayA.set_group_id(0) + gatewayB.set_group_id(1) + gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{nameA}", "pool": "{pool}", "group": ""' + "}") + gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{nameB}", "pool": "{pool}", "group": ""' + "}") + + channelA = grpc.insecure_channel(f"{addr}:{portA}") + stubA = pb2_grpc.GatewayStub(channelA) + channelB = grpc.insecure_channel(f"{addr}:{portB}") + stubB = pb2_grpc.GatewayStub(channelB) + + yield gatewayA.gateway_rpc, stubA, gatewayB.gateway_rpc, stubB + gatewayA.gateway_rpc.gateway_state.delete_state() + gatewayB.gateway_rpc.gateway_state.delete_state() + gatewayA.server.stop(grace=1) + gatewayB.server.stop(grace=1) + class TestGet: + @pytest.mark.order1 def test_get_subsystems(self, caplog, gateway): caplog.clear() cli(["subsystem", "list"]) assert "No subsystems" in caplog.text + @pytest.mark.order2 def test_get_subsystems_ipv6(self, caplog, gateway): caplog.clear() cli(["--server-address", server_addr_ipv6, "subsystem", "list"]) assert "No subsystems" in caplog.text + @pytest.mark.order3 def test_get_gateway_info(self, caplog, gateway): gw, stub = gateway caplog.clear() @@ -128,6 +182,7 @@ def test_get_gateway_info(self, caplog, gateway): assert gw_info.bool_status == True class TestCreate: + @pytest.mark.order4 def test_create_subsystem(self, caplog, gateway): caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016"]) @@ -220,6 +275,7 @@ def test_create_subsystem(self, caplog, gateway): assert subs_list.subsystems[0].nqn == subsystem assert subs_list.subsystems[1].nqn == subsystem2 + @pytest.mark.order5 def test_create_subsystem_with_discovery_nqn(self, caplog, gateway): caplog.clear() rc = 0 @@ -231,6 +287,14 @@ def test_create_subsystem_with_discovery_nqn(self, caplog, gateway): assert "Can't add a discovery subsystem" in caplog.text assert rc == 2 + @pytest.mark.order6 + def test_add_namespace_wrong_balancing_group(self, caplog, gateway): + caplog.clear() + cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image4, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", "100", "--force"]) + assert f"Failure adding namespace to {subsystem}:" in caplog.text + assert f"Load balancing group 100 doesn't exist" in caplog.text + + @pytest.mark.order7 def test_add_namespace_wrong_block_size(self, caplog, gateway): gw, stub = gateway caplog.clear() @@ -241,22 +305,25 @@ def test_add_namespace_wrong_block_size(self, caplog, gateway): assert f"Failure adding namespace" in caplog.text assert f"block size can't be zero" in caplog.text - def test_add_namespace(self, caplog, gateway): + @pytest.mark.order8 + def test_add_namespace(self, caplog, two_gateways): + gwA, stubA, gwB, stubB = two_gateways caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", "junk", "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", "1"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", "junk", "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", anagrpid]) assert f"RBD pool junk doesn't exist" in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", "1", "--force"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", anagrpid, "--force"]) assert f"Adding namespace 1 to {subsystem}: Successful" in caplog.text - assert "Allocated cluster name='cluster_context_1_0'" in caplog.text + assert f"Allocated cluster name='cluster_context_{anagrpid}_0'" in caplog.text + assert f"get_cluster cluster_name='cluster_context_{anagrpid}_0'" in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--size", "36M", "--rbd-create-image", "--load-balancing-group", "1", "--force"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--size", "36M", "--rbd-create-image", "--load-balancing-group", anagrpid, "--force"]) assert f"Image {pool}/{image2} already exists with a size of 16777216 bytes which differs from the requested size of 37748736 bytes" in caplog.text assert f"Can't create RBD image {pool}/{image2}" in caplog.text caplog.clear() rc = 0 try: - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16MB", "--load-balancing-group", "1"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16MB", "--load-balancing-group", anagrpid]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -265,7 +332,7 @@ def test_add_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size=-16MB", "--rbd-create-image", "--load-balancing-group", "1"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size=-16MB", "--rbd-create-image", "--load-balancing-group", anagrpid]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -274,7 +341,7 @@ def test_add_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "1x6MB", "--load-balancing-group", "1", "--rbd-create-image"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "1x6MB", "--load-balancing-group", anagrpid, "--rbd-create-image"]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -283,7 +350,7 @@ def test_add_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16MiB", "--load-balancing-group", "1", "--rbd-create-image"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16MiB", "--load-balancing-group", anagrpid, "--rbd-create-image"]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -292,96 +359,100 @@ def test_add_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16mB", "--load-balancing-group", "1", "--rbd-create-image"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--block-size", "1024", "--size", "16mB", "--load-balancing-group", anagrpid, "--rbd-create-image"]) except SystemExit as sysex: rc = int(str(sysex)) pass assert "must be numeric" in caplog.text assert rc == 2 caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--block-size", "1024", "--load-balancing-group", "1"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--block-size", "1024", "--load-balancing-group", anagrpid, "--force"]) assert f"Adding namespace 2 to {subsystem}: Successful" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "1"]) - assert '"load_balancing_group": 1' in caplog.text + cli(["--server-port", "5501", "--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) + assert f'"load_balancing_group": {anagrpid}' in caplog.text assert '"block_size": 512' in caplog.text assert f'"uuid": "{uuid}"' in caplog.text assert '"rw_ios_per_second": "0"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "2"]) - assert '"load_balancing_group": 1' in caplog.text + cli(["--server-port", "5501", "--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "2"]) + assert f'"load_balancing_group": {anagrpid}' in caplog.text assert '"block_size": 1024' in caplog.text assert f'"uuid": "{uuid}"' not in caplog.text assert '"rw_ios_per_second": "0"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid]) + cli(["--server-port", "5501", "--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid]) assert f'"uuid": "{uuid}"' in caplog.text caplog.clear() - cli(["namespace", "change_load_balancing_group", "--subsystem", subsystem, "--nsid", nsid, "--load-balancing-group", anagrpid2]) + cli(["--server-port", "5501", "namespace", "change_load_balancing_group", "--subsystem", subsystem, "--nsid", nsid, "--load-balancing-group", "10"]) + assert f"Failure changing load balancing group for namespace using NSID {nsid}" in caplog.text + assert f"Load balancing group 10 doesn't exist" in caplog.text + caplog.clear() + cli(["--server-port", "5501", "namespace", "change_load_balancing_group", "--subsystem", subsystem, "--nsid", nsid, "--load-balancing-group", anagrpid2]) assert f"Changing load balancing group of namespace {nsid} in {subsystem} to {anagrpid2}: Successful" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) + cli(["--server-port", "5501", "--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) assert f'"load_balancing_group": {anagrpid2}' in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image3, "--size", "4GB", "--rbd-create-image", "--load-balancing-group", "1"]) + cli(["--server-port", "5501", "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image3, "--size", "4GB", "--rbd-create-image", "--load-balancing-group", anagrpid]) assert f"Adding namespace 3 to {subsystem}: Successful" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "3"]) + cli(["--server-port", "5501", "--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "3"]) assert '"rbd_image_size": "4294967296"' in caplog.text + assert f'"load_balancing_group": {anagrpid}' in caplog.text + @pytest.mark.order9 def test_add_namespace_ipv6(self, caplog, gateway): caplog.clear() - cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", "1","--force"]) + cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--nsid", "4", "--force"]) assert f"Adding namespace 4 to {subsystem}: Successful" in caplog.text - assert f'will continue as the "force" argument was used' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "3"]) - assert '"load_balancing_group": 1' in caplog.text - cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--nsid", "8", "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", "1", "--force"]) - assert f"Adding namespace 8 to {subsystem}: Successful" in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "4"]) + assert f'"load_balancing_group": {anagrpid}' in caplog.text + cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--nsid", "5", "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--force"]) + assert f"Adding namespace 5 to {subsystem}: Successful" in caplog.text + assert f'will continue as the "force" argument was used' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "8"]) - assert '"load_balancing_group": 1' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "5"]) + assert f'"load_balancing_group": {anagrpid}' in caplog.text + @pytest.mark.order10 def test_add_namespace_same_image(self, caplog, gateway): caplog.clear() img_name = f"{image}_test" - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--size", "16MB", "--load-balancing-group", "1", "--rbd-create-image", "--load-balancing-group", "1"]) - assert f"Adding namespace 5 to {subsystem}: Successful" in caplog.text - caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--size", "16MB", "--load-balancing-group", "1", "--rbd-create-image", "--load-balancing-group", "1"]) - assert f"RBD image {pool}/{img_name} is already used by a namespace" in caplog.text - assert f"you can find the offending namespace by using" in caplog.text + cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--size", "16MB", "--load-balancing-group", anagrpid, "--rbd-create-image", "--nsid", "6", "--uuid", uuid2]) + assert f"Adding namespace 6 to {subsystem}: Successful" in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--load-balancing-group", "1"]) + cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--size", "16MB", "--load-balancing-group", anagrpid, "--rbd-create-image", "--nsid", "7"]) assert f"RBD image {pool}/{img_name} is already used by a namespace" in caplog.text assert f"you can find the offending namespace by using" in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--load-balancing-group", "1", "--force"]) - assert f"Adding namespace 6 to {subsystem}: Successful" in caplog.text + cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--load-balancing-group", anagrpid, "--force", "--nsid", "7"]) + assert f"Adding namespace 7 to {subsystem}: Successful" in caplog.text assert f"RBD image {pool}/{img_name} is already used by a namespace" in caplog.text assert f'will continue as the "force" argument was used' in caplog.text + @pytest.mark.order11 def test_resize_namespace(self, caplog, gateway): gw, stub = gateway caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) + assert f'"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "16777216"' in caplog.text - assert f'"uuid": "{uuid}"' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "2MB"]) + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "2MB"]) assert f"new size 2097152 bytes is smaller than current size 16777216 bytes" in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "32MB"]) - assert f"Resizing namespace {nsid} in {subsystem} to 32 MiB: Successful" in caplog.text + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32MB"]) + assert f"Resizing namespace 6 in {subsystem} to 32 MiB: Successful" in caplog.text caplog.clear() rc = 0 try: - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "32mB"]) + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32mB"]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -390,7 +461,7 @@ def test_resize_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size=-32MB"]) + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size=-32MB"]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -399,42 +470,44 @@ def test_resize_namespace(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "3x2GB"]) + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "3x2GB"]) except SystemExit as sysex: rc = int(str(sysex)) pass assert "must be numeric" in caplog.text assert rc == 2 caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) + assert f'"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "33554432"' in caplog.text - assert f'"uuid": "{uuid}"' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text + assert '"nsid": 1' not in caplog.text assert '"nsid": 2' not in caplog.text assert '"nsid": 3' not in caplog.text assert '"nsid": 4' not in caplog.text - assert '"nsid": 8' not in caplog.text + assert '"nsid": 5' not in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--uuid", uuid, "--size", "64MB"]) - assert f"Resizing namespace with UUID {uuid} in {subsystem} to 64 MiB: Successful" in caplog.text + cli(["namespace", "resize", "--subsystem", subsystem, "--uuid", uuid2, "--size", "64MB"]) + assert f"Resizing namespace with UUID {uuid2} in {subsystem} to 64 MiB: Successful" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) + assert f'"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "67108864"' in caplog.text - assert f'"uuid": "{uuid}"' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text + assert '"nsid": 1' not in caplog.text assert '"nsid": 2' not in caplog.text assert '"nsid": 3' not in caplog.text assert '"nsid": 4' not in caplog.text - assert '"nsid": 8' not in caplog.text + assert '"nsid": 5' not in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "12", "--uuid", uuid, "--size", "128MB"]) assert f"Failure resizing namespace using NSID 12 and UUID {uuid} on {subsystem}: Can't find namespace" in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "32MB"]) - assert f"Failure resizing namespace using NSID {nsid} on {subsystem}: new size 33554432 bytes is smaller than current size 67108864 bytes" in caplog.text - ns = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32MB"]) + assert f"Failure resizing namespace using NSID 6 on {subsystem}: new size 33554432 bytes is smaller than current size 67108864 bytes" in caplog.text + ns = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) assert ns != None assert ns.status == 0 assert len(ns.namespaces) == 1 @@ -442,54 +515,56 @@ def test_resize_namespace(self, caplog, gateway): rc = rpc_bdev.bdev_rbd_delete(gw.spdk_rpc_client, name=ns.namespaces[0].bdev_name) assert rc caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", nsid, "--size", "128MB"]) - assert f"Failure resizing namespace using NSID {nsid} on {subsystem}: Can't find namespace" in caplog.text + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "128MB"]) + assert f"Failure resizing namespace using NSID 6 on {subsystem}: Can't find namespace" in caplog.text caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--nsid", nsid, "--rbd-pool", pool, "--rbd-image", image, "--uuid", uuid, "--force", "--load-balancing-group", "1"]) - assert f"Adding namespace 1 to {subsystem}: Successful" in caplog.text + cli(["namespace", "add", "--subsystem", subsystem, "--nsid", "6", "--rbd-pool", pool, "--rbd-image", image, "--uuid", uuid2, "--force", "--load-balancing-group", anagrpid, "--force"]) + assert f"Adding namespace 6 to {subsystem}: Successful" in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "3", "--size", "6GB"]) - assert f"Resizing namespace 3 in {subsystem} to 6144 MiB: Successful" in caplog.text + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "4", "--size", "6GB"]) + assert f"Resizing namespace 4 in {subsystem} to 6144 MiB: Successful" in caplog.text + @pytest.mark.order12 def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) + assert f'"nsid": 6' in caplog.text assert '"rw_ios_per_second": "0"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text assert '"r_mbytes_per_second": "0"' in caplog.text assert '"w_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", nsid, "--rw-ios-per-second", "2000"]) - assert f"Setting QOS limits of namespace {nsid} in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 1 on {subsystem}" in caplog.text + cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--rw-ios-per-second", "2000"]) + assert f"Setting QOS limits of namespace 6 in {subsystem}: Successful" in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) + assert f'"nsid": 6' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text assert '"r_mbytes_per_second": "0"' in caplog.text assert '"w_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["namespace", "set_qos", "--subsystem", subsystem, "--uuid", uuid, "--rw-megabytes-per-second", "30"]) - assert f"Setting QOS limits of namespace with UUID {uuid} in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 1 on {subsystem}" not in caplog.text + cli(["namespace", "set_qos", "--subsystem", subsystem, "--uuid", uuid2, "--rw-megabytes-per-second", "30"]) + assert f"Setting QOS limits of namespace with UUID {uuid2} in {subsystem}: Successful" in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" not in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid]) - assert f'"uuid": "{uuid}"' in caplog.text - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) + assert f'"uuid": "{uuid2}"' in caplog.text + assert f'"nsid": 6' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "30"' in caplog.text assert '"r_mbytes_per_second": "0"' in caplog.text assert '"w_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", nsid, + cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--r-megabytes-per-second", "15", "--w-megabytes-per-second", "25"]) - assert f"Setting QOS limits of namespace {nsid} in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 1 on {subsystem}" not in caplog.text + assert f"Setting QOS limits of namespace 6 in {subsystem}: Successful" in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" not in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"nsid": {nsid}' in caplog.text + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) + assert f'"nsid": 6' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "30"' in caplog.text assert '"r_mbytes_per_second": "15"' in caplog.text @@ -497,7 +572,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", nsid]) + cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6"]) except SystemExit as sysex: rc = int(str(sysex)) pass @@ -506,37 +581,38 @@ def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() rc = 0 try: - cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", nsid, "--w-megabytes-per-second", "JUNK"]) + cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--w-megabytes-per-second", "JUNK"]) except SystemExit as sysex: rc = int(str(sysex)) pass assert "error: argument --w-megabytes-per-second: invalid int value: 'JUNK'" in caplog.text assert rc == 2 + @pytest.mark.order13 def test_namespace_io_stats(self, caplog, gateway): caplog.clear() - cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--nsid", nsid]) - assert f'IO statistics for namespace {nsid} in {subsystem}' in caplog.text + cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--nsid", "6"]) + assert f'IO statistics for namespace 6 in {subsystem}' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--nsid", nsid]) + cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--nsid", "6"]) assert f'"status": 0' in caplog.text assert f'"subsystem_nqn": "{subsystem}"' in caplog.text - assert f'"nsid": {nsid}' in caplog.text - assert f'"uuid": "{uuid}"' in caplog.text + assert f'"nsid": 6' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text assert f'"ticks":' in caplog.text assert f'"bytes_written":' in caplog.text assert f'"bytes_read":' in caplog.text assert f'"max_write_latency_ticks":' in caplog.text assert f'"io_error":' in caplog.text caplog.clear() - cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid]) - assert f'IO statistics for namespace with UUID {uuid} in {subsystem}' in caplog.text + cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid2]) + assert f'IO statistics for namespace with UUID {uuid2} in {subsystem}' in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid]) + cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid2]) assert f'"status": 0' in caplog.text assert f'"subsystem_nqn": "{subsystem}"' in caplog.text - assert f'"nsid": {nsid}' in caplog.text - assert f'"uuid": "{uuid}"' in caplog.text + assert f'"nsid": 6' in caplog.text + assert f'"uuid": "{uuid2}"' in caplog.text assert f'"ticks":' in caplog.text assert f'"bytes_written":' in caplog.text assert f'"bytes_read":' in caplog.text @@ -553,6 +629,7 @@ def test_namespace_io_stats(self, caplog, gateway): assert rc == 2 @pytest.mark.parametrize("host", host_list) + @pytest.mark.order14 def test_add_host(self, caplog, host): caplog.clear() rc = 0 @@ -570,6 +647,7 @@ def test_add_host(self, caplog, host): else: assert f"Adding host {host} to {subsystem}: Successful" in caplog.text + @pytest.mark.order15 def test_add_host_invalid_nqn(self, caplog): caplog.clear() cli(["host", "add", "--subsystem", subsystem, "--host", "nqn.2016"]) @@ -587,6 +665,7 @@ def test_add_host_invalid_nqn(self, caplog): assert f"contains invalid characters" in caplog.text @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order16 def test_create_listener(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--subsystem", subsystem, "--host-name", host_name] + listener) @@ -596,6 +675,7 @@ def test_create_listener(self, caplog, listener, gateway): @pytest.mark.parametrize("listener_ipv6", listener_list_ipv6) + @pytest.mark.order17 def test_create_listener_ipv6(self, caplog, listener_ipv6, gateway): caplog.clear() cli(["--server-address", server_addr_ipv6, "listener", "add", "--subsystem", subsystem, "--host-name", host_name] + listener_ipv6) @@ -604,6 +684,7 @@ def test_create_listener_ipv6(self, caplog, listener_ipv6, gateway): assert f"Adding {subsystem} listener at [{listener_ipv6[1]}]:{listener_ipv6[3]}: Successful" in caplog.text @pytest.mark.parametrize("listener", listener_list_no_port) + @pytest.mark.order18 def test_create_listener_no_port(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--subsystem", subsystem, "--host-name", host_name] + listener) @@ -613,6 +694,7 @@ def test_create_listener_no_port(self, caplog, listener, gateway): @pytest.mark.parametrize("listener", listener_list) @pytest.mark.parametrize("listener_ipv6", listener_list_ipv6) + @pytest.mark.order19 def test_list_listeners(self, caplog, listener, listener_ipv6, gateway): caplog.clear() cli(["--format", "json", "listener", "list", "--subsystem", subsystem]) @@ -625,6 +707,7 @@ def test_list_listeners(self, caplog, listener, listener_ipv6, gateway): assert f'"adrfam": "ipv6"' in caplog.text @pytest.mark.parametrize("listener", listener_list_negative_port) + @pytest.mark.order20 def test_create_listener_negative_port(self, caplog, listener, gateway): caplog.clear() rc = 0 @@ -637,6 +720,7 @@ def test_create_listener_negative_port(self, caplog, listener, gateway): assert rc == 2 @pytest.mark.parametrize("listener", listener_list_big_port) + @pytest.mark.order21 def test_create_listener_port_too_big(self, caplog, listener, gateway): caplog.clear() rc = 0 @@ -649,12 +733,14 @@ def test_create_listener_port_too_big(self, caplog, listener, gateway): assert rc == 2 @pytest.mark.parametrize("listener", listener_list_wrong_host) + @pytest.mark.order22 def test_create_listener_wrong_hostname(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--subsystem", subsystem] + listener) assert f"Gateway's host name must match current host ({host_name})" in caplog.text @pytest.mark.parametrize("listener", listener_list_invalid_adrfam) + @pytest.mark.order23 def test_create_listener_invalid_adrfam(self, caplog, listener, gateway): caplog.clear() rc = 0 @@ -667,6 +753,7 @@ def test_create_listener_invalid_adrfam(self, caplog, listener, gateway): assert rc == 2 @pytest.mark.parametrize("listener", listener_list_discovery) + @pytest.mark.order24 def test_create_listener_on_discovery(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--host-name", host_name] + listener) @@ -674,6 +761,7 @@ def test_create_listener_on_discovery(self, caplog, listener, gateway): class TestDelete: @pytest.mark.parametrize("host", host_list) + @pytest.mark.order25 def test_remove_host(self, caplog, host, gateway): caplog.clear() rc = 0 @@ -692,6 +780,7 @@ def test_remove_host(self, caplog, host, gateway): assert f"Removing host {host} access from {subsystem}: Successful" in caplog.text @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order26 def test_delete_listener_using_wild_hostname_no_force(self, caplog, listener, gateway): caplog.clear() rc = 0 @@ -704,18 +793,21 @@ def test_delete_listener_using_wild_hostname_no_force(self, caplog, listener, ga assert rc == 2 @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order27 def test_delete_listener(self, caplog, listener, gateway): caplog.clear() cli(["listener", "del", "--force", "--subsystem", subsystem, "--host-name", host_name] + listener) assert f"Deleting listener {listener[1]}:{listener[3]} from {subsystem} for host {host_name}: Successful" in caplog.text @pytest.mark.parametrize("listener_ipv6", listener_list_ipv6) + @pytest.mark.order28 def test_delete_listener_ipv6(self, caplog, listener_ipv6, gateway): caplog.clear() cli(["--server-address", server_addr_ipv6, "listener", "del", "--subsystem", subsystem, "--host-name", host_name] + listener_ipv6) assert f"Deleting listener [{listener_ipv6[1]}]:{listener_ipv6[3]} from {subsystem} for host {host_name}: Successful" in caplog.text @pytest.mark.parametrize("listener", listener_list_no_port) + @pytest.mark.order29 def test_delete_listener_no_port(self, caplog, listener, gateway): caplog.clear() rc = 0 @@ -731,6 +823,7 @@ def test_delete_listener_no_port(self, caplog, listener, gateway): assert f"Deleting listener {listener[1]}:4420 from {subsystem} for host {host_name}: Successful" in caplog.text @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order30 def test_delete_listener_using_wild_hostname(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--subsystem", subsystem, "--host-name", host_name] + listener) @@ -748,10 +841,11 @@ def test_delete_listener_using_wild_hostname(self, caplog, listener, gateway): cli(["--format", "json", "listener", "list", "--subsystem", subsystem]) assert f'"trsvcid": {listener[3]}' not in caplog.text + @pytest.mark.order31 def test_remove_namespace(self, caplog, gateway): gw, stub = gateway caplog.clear() - ns_list = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) + ns_list = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) assert ns_list != None assert ns_list.status == 0 assert len(ns_list.namespaces) == 1 @@ -772,8 +866,8 @@ def test_remove_namespace(self, caplog, gateway): ret = stub.namespace_delete(del_ns_req) assert "At least one of NSID or UUID should be specified for finding a namesapce" in caplog.text caplog.clear() - cli(["namespace", "del", "--subsystem", subsystem, "--nsid", nsid]) - assert f"Deleting namespace {nsid} from {subsystem}: Successful" in caplog.text + cli(["namespace", "del", "--subsystem", subsystem, "--nsid", "6"]) + assert f"Deleting namespace 6 from {subsystem}: Successful" in caplog.text assert f"Error removing namespace's QOS limits, they might not have been set" not in caplog.text bdev_found = False bdev_list = rpc_bdev.bdev_get_bdevs(gw.spdk_rpc_client) @@ -790,9 +884,10 @@ def test_remove_namespace(self, caplog, gateway): assert f"Deleting namespace 2 from {subsystem}: Successful" in caplog.text assert f"Error removing namespace's QOS limits, they might not have been set" in caplog.text caplog.clear() - cli(["namespace", "del", "--subsystem", subsystem, "--nsid", nsid_ipv6]) - assert f"Deleting namespace {nsid_ipv6} from {subsystem}: Successful" in caplog.text + cli(["namespace", "del", "--subsystem", subsystem, "--nsid", "4"]) + assert f"Deleting namespace 4 from {subsystem}: Successful" in caplog.text + @pytest.mark.order32 def test_delete_subsystem(self, caplog, gateway): caplog.clear() cli(["subsystem", "del", "--subsystem", subsystem]) @@ -807,6 +902,7 @@ def test_delete_subsystem(self, caplog, gateway): cli(["subsystem", "list"]) assert "No subsystems" in caplog.text + @pytest.mark.order33 def test_delete_subsystem_with_discovery_nqn(self, caplog, gateway): caplog.clear() rc = 0 @@ -819,6 +915,7 @@ def test_delete_subsystem_with_discovery_nqn(self, caplog, gateway): assert rc == 2 class TestCreateWithAna: + @pytest.mark.order34 def test_create_subsystem_ana(self, caplog, gateway): caplog.clear() cli(["subsystem", "list"]) @@ -831,28 +928,18 @@ def test_create_subsystem_ana(self, caplog, gateway): assert serial not in caplog.text assert subsystem in caplog.text + @pytest.mark.order35 def test_add_namespace_ana(self, caplog, gateway): caplog.clear() - cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid]) - assert f"Adding namespace {nsid} to {subsystem}: Successful" in caplog.text - assert "Allocated cluster name='cluster_context_2_0'" in caplog.text - caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"load_balancing_group": {anagrpid}' in caplog.text - - def test_change_namespace_lb_group(self, caplog, gateway): + cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--force", "--nsid", "10"]) + assert f"Adding namespace 10 to {subsystem}: Successful" in caplog.text + assert f"get_cluster cluster_name='cluster_context_{anagrpid}_0'" in caplog.text caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) + cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "10"]) assert f'"load_balancing_group": {anagrpid}' in caplog.text - caplog.clear() - cli(["namespace", "change_load_balancing_group", "--subsystem", subsystem, "--nsid", nsid, "--load-balancing-group", anagrpid2]) - assert f"Changing load balancing group of namespace {nsid} in {subsystem} to {anagrpid2}: Successful" in caplog.text - caplog.clear() - cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", nsid]) - assert f'"load_balancing_group": {anagrpid2}' in caplog.text - assert f'"load_balancing_group": {anagrpid}' not in caplog.text @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order36 def test_create_listener_ana(self, caplog, listener, gateway): caplog.clear() cli(["listener", "add", "--subsystem", subsystem, "--host-name", host_name] + listener) @@ -863,16 +950,19 @@ def test_create_listener_ana(self, caplog, listener, gateway): class TestDeleteAna: @pytest.mark.parametrize("listener", listener_list) + @pytest.mark.order37 def test_delete_listener_ana(self, caplog, listener, gateway): caplog.clear() cli(["listener", "del", "--subsystem", subsystem, "--host-name", host_name] + listener) assert f"Deleting listener {listener[1]}:{listener[3]} from {subsystem} for host {host_name}: Successful" in caplog.text + @pytest.mark.order38 def test_remove_namespace_ana(self, caplog, gateway): caplog.clear() - cli(["namespace", "del", "--subsystem", subsystem, "--nsid", nsid]) - assert f"Deleting namespace {nsid} from {subsystem}: Successful" in caplog.text + cli(["namespace", "del", "--subsystem", subsystem, "--nsid", "10"]) + assert f"Deleting namespace 10 from {subsystem}: Successful" in caplog.text + @pytest.mark.order39 def test_delete_subsystem_ana(self, caplog, gateway): caplog.clear() cli(["subsystem", "del", "--subsystem", subsystem]) @@ -882,6 +972,7 @@ def test_delete_subsystem_ana(self, caplog, gateway): assert "No subsystems" in caplog.text class TestGwLogLevel: + @pytest.mark.order40 def test_gw_log_level(self, caplog, gateway): caplog.clear() cli(["gw", "get_log_level"]) @@ -918,6 +1009,7 @@ def test_gw_log_level(self, caplog, gateway): assert 'Gateway log level is "debug"' in caplog.text class TestSPDKLOg: + @pytest.mark.order41 def test_log_flags(self, caplog, gateway): caplog.clear() cli(["spdk_log_level", "get"]) diff --git a/tests/test_grpc.py b/tests/test_grpc.py index 7190ebdea..5b04a68ca 100644 --- a/tests/test_grpc.py +++ b/tests/test_grpc.py @@ -33,6 +33,7 @@ def test_create_get_subsys(caplog, config): with GatewayServer(config) as gateway: gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") for i in range(created_resource_count): create_resource_by_index(i) @@ -47,11 +48,6 @@ def test_create_get_subsys(caplog, config): gateway.gateway_rpc.host_name, "--traddr", "127.0.0.1", "--trsvcid", "5001"]) assert f"Adding {subsystem_prefix}0 listener at 127.0.0.1:5001: Successful" in caplog.text - # Change ANA group id for the first namesapce - cli(["namespace", "change_load_balancing_group", "--subsystem", f"{subsystem_prefix}0", "--nsid", "1", - "--load-balancing-group", "4"]) - assert f"Changing load balancing group of namespace 1 in {subsystem_prefix}0 to 4: Successful" in caplog.text - # Set QOS for the first namespace cli(["namespace", "set_qos", "--subsystem", f"{subsystem_prefix}0", "--nsid", "1", "--rw-ios-per-second", "2000"]) @@ -69,6 +65,7 @@ def test_create_get_subsys(caplog, config): with GatewayServer(config) as gateway: gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") for i in range(subsys_list_count): cli(["--format", "plain", "subsystem", "list"]) @@ -77,7 +74,7 @@ def test_create_get_subsys(caplog, config): time.sleep(0.1) time.sleep(20) # Make sure update() is over - assert f"{subsystem_prefix}0 with ANA group id 4" in caplog.text + assert f"{subsystem_prefix}0 with ANA group id 1" in caplog.text assert f"Received request to set QOS limits for namespace using NSID 1 on {subsystem_prefix}0, R/W IOs per second: 2000 Read megabytes per second: 5" in caplog.text caplog.clear() cli(["--format", "plain", "subsystem", "list"]) diff --git a/tests/test_log_files.py b/tests/test_log_files.py index 006336054..972ccf4eb 100644 --- a/tests/test_log_files.py +++ b/tests/test_log_files.py @@ -13,6 +13,7 @@ config = "ceph-nvmeof.conf" subsystem_prefix = "nqn.2016-06.io.spdk:cnode" +pool = "rbd" def clear_log_files(): files = os.listdir("/var/log/ceph") @@ -49,6 +50,7 @@ def gateway(config, request): # Start gateway gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") # Bind the client and Gateway channel = grpc.insecure_channel(f"{addr}:{port}") diff --git a/tests/test_multi_gateway.py b/tests/test_multi_gateway.py index a19363510..c6369f846 100644 --- a/tests/test_multi_gateway.py +++ b/tests/test_multi_gateway.py @@ -15,6 +15,7 @@ def conn(config): """Sets up and tears down Gateways A and B.""" # Setup GatewayA and GatewayB configs + pool = config.get("ceph", "pool") configA = copy.deepcopy(config) configA.config["gateway"]["name"] = "GatewayA" configA.config["gateway"]["group"] = "Group1" @@ -27,8 +28,7 @@ def conn(config): portB = portA + 2 configB.config["gateway"]["name"] = "GatewayB" configB.config["gateway"]["port"] = str(portB) - configB.config["gateway"]["state_update_interval_sec"] = str( - update_interval_sec) + configB.config["gateway"]["state_update_interval_sec"] = str(update_interval_sec) configB.config["spdk"]["rpc_socket_name"] = "spdk_GatewayB.sock" configB.config["spdk"]["tgt_cmd_extra_args"] = "-m 0x02" @@ -39,11 +39,13 @@ def conn(config): ): gatewayA.set_group_id(0) gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayA.name}", "pool": "{pool}", "group": "Group1"' + "}") # Delete existing OMAP state gatewayA.gateway_rpc.gateway_state.delete_state() # Create new gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": "Group1"' + "}") # Bind the client and Gateways A & B channelA = grpc.insecure_channel(f"{addr}:{portA}") @@ -73,7 +75,7 @@ def test_multi_gateway_coordination(config, image, conn): pool = config.get("ceph", "pool") # Send requests to create a subsystem with one namespace to GatewayA - subsystem_req = pb2.create_subsystem_req(subsystem_nqn=nqn, + subsystem_req = pb2.create_subsystem_req(subsystem_nqn=nqn, max_namespaces=256, serial_number=serial, enable_ha=True) namespace_req = pb2.namespace_add_req(subsystem_nqn=nqn, rbd_pool_name=pool, diff --git a/tests/test_namespaces.py b/tests/test_namespaces.py index 6a7dcc58e..6f362b82e 100644 --- a/tests/test_namespaces.py +++ b/tests/test_namespaces.py @@ -46,11 +46,13 @@ def conn(config): ): gatewayA.set_group_id(0) gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayA.name}", "pool": "{pool}", "group": "Group1"' + "}") # Delete existing OMAP state gatewayA.gateway_rpc.gateway_state.delete_state() # Create new gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": "Group1"' + "}") # Bind the client and Gateways A & B channelA = grpc.insecure_channel(f"{addr}:{portA}") diff --git a/tests/test_nsid.py b/tests/test_nsid.py index 01f78dcf8..4eea52567 100644 --- a/tests/test_nsid.py +++ b/tests/test_nsid.py @@ -38,14 +38,16 @@ def setup_config(config, gw1_name, gw2_name, gw_group, update_notify, update_int return configA, configB -def start_servers(gatewayA, gatewayB, addr, portA, portB): +def start_servers(gatewayA, gatewayB, gw_group, addr, portA, portB): gatewayA.set_group_id(0) gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayA.name}", "pool": "{pool}", "group": "{gw_group}"' + "}") # Delete existing OMAP state gatewayA.gateway_rpc.gateway_state.delete_state() # Create new gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": "{gw_group}"' + "}") gatewayB.gateway_rpc.gateway_state.delete_state() # Bind the client and Gateways A & B @@ -70,18 +72,18 @@ def test_multi_gateway_namespace_ids(config, image, caplog): GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB, ): - stubA, stubB = start_servers(gatewayA, gatewayB, addr, portA, portB) + stubA, stubB = start_servers(gatewayA, gatewayB, "Group1", addr, portA, portB) # Send requests to create a subsystem on GatewayA caplog.clear() subsystem = f"{subsystem_prefix}PPP" - subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem) + subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, max_namespaces=256) ret_subsystem = stubA.create_subsystem(subsystem_add_req) assert ret_subsystem.status != 0 assert "HA must be enabled for subsystems" in caplog.text caplog.clear() subsystem = f"{subsystem_prefix}WWW" - subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, enable_ha=True) + subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, max_namespaces=256, enable_ha=True) ret_subsystem = stubA.create_subsystem(subsystem_add_req) assert ret_subsystem.status == 0 assert f"create_subsystem {subsystem}: True" in caplog.text @@ -152,6 +154,7 @@ def test_multi_gateway_namespace_ids(config, image, caplog): gatewayB = GatewayServer(configB) gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": "Group1"' + "}") channelB = grpc.insecure_channel(f"{addr}:{portB}") stubB = pb2_grpc.GatewayStub(channelB) time.sleep(10) diff --git a/tests/test_old_omap.py b/tests/test_old_omap.py index e55f7ac9f..647a725f3 100644 --- a/tests/test_old_omap.py +++ b/tests/test_old_omap.py @@ -3,10 +3,13 @@ import grpc from control.proto import gateway_pb2_grpc as pb2_grpc +pool = "rbd" + def test_old_omap(caplog, config): with GatewayServer(config) as gateway: gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") gateway.gateway_rpc.gateway_state.omap._add_key("bdev_dummy", "dummy") caplog.clear() diff --git a/tests/test_omap_lock.py b/tests/test_omap_lock.py index 3bc15804e..16ac70795 100644 --- a/tests/test_omap_lock.py +++ b/tests/test_omap_lock.py @@ -41,14 +41,16 @@ def setup_config(config, gw1_name, gw2_name, gw_group, update_notify ,update_int return configA, configB -def start_servers(gatewayA, gatewayB, addr, portA, portB): +def start_servers(gatewayA, gatewayB, gw_group, addr, portA, portB): gatewayA.set_group_id(0) gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayA.name}", "pool": "{pool}", "group": "{gw_group}"' + "}") # Delete existing OMAP state gatewayA.gateway_rpc.gateway_state.delete_state() # Create new gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": "{gw_group}"' + "}") gatewayB.gateway_rpc.gateway_state.delete_state() # Bind the client and Gateways A & B @@ -81,7 +83,7 @@ def conn_omap_reread(config, request): GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB, ): - stubA, stubB = start_servers(gatewayA, gatewayB, addr, portA, portB) + stubA, stubB = start_servers(gatewayA, gatewayB, "Group1", addr, portA, portB) yield stubA, stubB, gatewayA.gateway_rpc, gatewayB.gateway_rpc stop_servers(gatewayA, gatewayB) @@ -100,7 +102,7 @@ def conn_lock_twice(config, request): GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB, ): - stubA, stubB = start_servers(gatewayA, gatewayB, addr, portA, portB) + stubA, stubB = start_servers(gatewayA, gatewayB, "Group2", addr, portA, portB) yield stubA, stubB stop_servers(gatewayA, gatewayB) @@ -124,7 +126,7 @@ def conn_concurrent(config, request): GatewayServer(configA) as gatewayA, GatewayServer(configB) as gatewayB, ): - stubA, stubB = start_servers(gatewayA, gatewayB, addr, portA, portB) + stubA, stubB = start_servers(gatewayA, gatewayB, "Group3", addr, portA, portB) yield gatewayA.gateway_rpc, gatewayB.gateway_rpc, stubA, stubB stop_servers(gatewayA, gatewayB) @@ -135,7 +137,7 @@ def build_host_nqn(i): def create_resource_by_index(stub, i, caplog): subsystem = f"{subsystem_prefix}{i}" - subsystem_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, enable_ha=True) + subsystem_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, max_namespaces=256, enable_ha=True) ret_subsystem = stub.create_subsystem(subsystem_req) assert ret_subsystem.status == 0 if caplog != None: @@ -187,7 +189,7 @@ def test_multi_gateway_omap_reread(config, conn_omap_reread, caplog): num_subsystems = 2 # Send requests to create a subsystem with one namespace to GatewayA - subsystem_req = pb2.create_subsystem_req(subsystem_nqn=nqn, serial_number=serial, enable_ha=True) + subsystem_req = pb2.create_subsystem_req(subsystem_nqn=nqn, serial_number=serial, max_namespaces=256, enable_ha=True) namespace_req = pb2.namespace_add_req(subsystem_nqn=nqn, nsid=nsid, rbd_pool_name=pool, rbd_image_name=image, block_size=4096, create_image=True, size=16*1024*1024, force=True) @@ -315,7 +317,7 @@ def test_multi_gateway_listener_update(config, image, conn_concurrent, caplog): caplog.clear() subsystem = f"{subsystem_prefix}QQQ" - subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, enable_ha=True) + subsystem_add_req = pb2.create_subsystem_req(subsystem_nqn=subsystem, max_namespaces=256, enable_ha=True) ret_subsystem = stubA.create_subsystem(subsystem_add_req) assert ret_subsystem.status == 0 assert f"create_subsystem {subsystem}: True" in caplog.text diff --git a/tests/test_server.py b/tests/test_server.py index e10cdb47d..88e82def0 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -6,6 +6,8 @@ import unittest from control.server import GatewayServer +pool = "rbd" + class TestServer(unittest.TestCase): @pytest.fixture(autouse=True) def _config(self, config): @@ -36,6 +38,7 @@ def test_spdk_exception(self): with GatewayServer(config_spdk_exception) as gateway: gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") self.validate_exception(cm.exception) def test_spdk_abort(self): @@ -43,6 +46,7 @@ def test_spdk_abort(self): with GatewayServer(copy.deepcopy(self.config)) as gateway: gateway.set_group_id(0) gateway.serve() + gateway.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gateway.name}", "pool": "{pool}", "group": ""' + "}") time.sleep(10) # exited context, spdk process should be aborted here by __exit__() time.sleep(10) # let it dump @@ -68,8 +72,10 @@ def test_spdk_multi_gateway_exception(self): ): gatewayA.set_group_id(0) gatewayA.serve() + gatewayA.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayA.name}", "pool": "{pool}", "group": ""' + "}") gatewayB.set_group_id(1) gatewayB.serve() + gatewayB.ceph_utils.execute_ceph_monitor_command("{" + f'"prefix":"nvme-gw create", "id": "{gatewayB.name}", "pool": "{pool}", "group": ""' + "}") self.validate_exception(cm.exception) if __name__ == '__main__':