From 5eb266acf4971d10712bc6b65856d348f29aaa54 Mon Sep 17 00:00:00 2001 From: Arvindsrinivasan Lakshmi Narasimhan <55814491+arlakshm@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:40:31 -0700 Subject: [PATCH 01/13] [chassis] Modify the show ip route to hide the Ethernet-IB port in the output (#3537) * update show ip route for voq chassis Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan * add UT * add more UT * Fix linter errors * fix UT * make linter happy --------- Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- show/bgp_common.py | 25 ++-- tests/conftest.py | 15 ++- tests/ip_show_routes_multi_asic_test.py | 3 +- tests/ip_show_routes_voq_chassis_test.py | 112 ++++++++++++++++++ tests/mock_tables/asic0/ip_route_lc.json | 66 +++++++++++ tests/mock_tables/asic0/ip_route_lc_2.json | 56 +++++++++ .../mock_tables/asic0/ip_route_remote_lc.json | 106 +++++++++++++++++ tests/mock_tables/asic1/ip_route_lc.json | 106 +++++++++++++++++ tests/mock_tables/asic1/ip_route_lc_2.json | 56 +++++++++ .../mock_tables/asic1/ip_route_remote_lc.json | 106 +++++++++++++++++ tests/show_ip_route_common.py | 57 +++++++++ 11 files changed, 698 insertions(+), 10 deletions(-) create mode 100644 tests/ip_show_routes_voq_chassis_test.py create mode 100644 tests/mock_tables/asic0/ip_route_lc.json create mode 100644 tests/mock_tables/asic0/ip_route_lc_2.json create mode 100644 tests/mock_tables/asic0/ip_route_remote_lc.json create mode 100644 tests/mock_tables/asic1/ip_route_lc.json create mode 100644 tests/mock_tables/asic1/ip_route_lc_2.json create mode 100644 tests/mock_tables/asic1/ip_route_remote_lc.json diff --git a/show/bgp_common.py b/show/bgp_common.py index b51e9f1879..e9c0e12e8a 100644 --- a/show/bgp_common.py +++ b/show/bgp_common.py @@ -3,7 +3,7 @@ import json import utilities_common.multi_asic as multi_asic_util -from sonic_py_common import multi_asic +from sonic_py_common import device_info, multi_asic from utilities_common import constants ''' @@ -60,10 +60,12 @@ def get_nexthop_info_str(nxhp_info, filterByIp): else: str_2_return = " via {},".format(nxhp_info['ip']) if "interfaceName" in nxhp_info: + intfs = nxhp_info['interfaceName'] if filterByIp: - str_2_return += ", via {}".format(nxhp_info['interfaceName']) + str_2_return += ", via {}".format(intfs) else: - str_2_return += " {},".format(nxhp_info['interfaceName']) + str_2_return += " {},".format(intfs) + elif "directlyConnected" in nxhp_info: str_2_return = " is directly connected," if "interfaceName" in nxhp_info: @@ -80,10 +82,13 @@ def get_nexthop_info_str(nxhp_info, filterByIp): str_2_return += "(vrf {}, {},".format(nxhp_info['vrf'], nxhp_info['interfaceName']) if "active" not in nxhp_info: str_2_return += " inactive" + if "recursive" in nxhp_info: + if device_info.is_voq_chassis(): + str_2_return = " " + str_2_return + " recursive via iBGP" + else: + str_2_return += " (recursive)" if "onLink" in nxhp_info: str_2_return += " onlink" - if "recursive" in nxhp_info: - str_2_return += " (recursive)" if "source" in nxhp_info: str_2_return += ", src {}".format(nxhp_info['source']) if "labels" in nxhp_info: @@ -220,6 +225,12 @@ def merge_to_combined_route(combined_route, route, new_info_l): if nh['interfaceName'] == combined_route[route][j]['nexthops'][y]['interfaceName']: found = True break + if device_info.is_voq_chassis(): + if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: + if 'interfaceName' not in combined_route[route][j]['nexthops'][y]: + combined_route[route][j]['nexthops'][y] = nh + found = True + break elif "active" not in nh and "active" not in combined_route[route][j]['nexthops'][y]: if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: found = True @@ -253,7 +264,7 @@ def process_route_info(route_info, device, filter_back_end, print_ns_str, asic_c while len(new_info['nexthops']): nh = new_info['nexthops'].pop() if filter_back_end and back_end_intf_set != None and "interfaceName" in nh: - if nh['interfaceName'] in back_end_intf_set: + if nh['interfaceName'] in back_end_intf_set or nh['interfaceName'].startswith('Ethernet-IB'): del_cnt += 1 else: new_nhop_l.append(copy.deepcopy(nh)) @@ -327,6 +338,7 @@ def show_routes(args, namespace, display, verbose, ipver): if display not in ['frontend', 'all']: print("dislay option '{}' is not a valid option.".format(display)) return + device = multi_asic_util.MultiAsic(display, namespace) arg_strg = "" found_json = 0 @@ -376,7 +388,6 @@ def show_routes(args, namespace, display, verbose, ipver): # Need to add "ns" to form bgpX so it is sent to the correct bgpX docker to handle the request cmd = "show {} route {}".format(ipver, arg_strg) output = bgp_util.run_bgp_show_command(cmd, ns) - # in case no output or something went wrong with user specified cmd argument(s) error it out # error from FRR always start with character "%" if output == "": diff --git a/tests/conftest.py b/tests/conftest.py index 5dd31d523a..3874668a67 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -194,7 +194,11 @@ def setup_single_bgp_instance(request): elif request.param == 'ipv6_route': bgp_mocked_json = 'ipv6_route.json' elif request.param == 'ip_special_route': - bgp_mocked_json = 'ip_special_route.json' + bgp_mocked_json = 'ip_special_route.json' + elif request.param == 'ip_route_lc': + bgp_mocked_json = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + bgp_mocked_json = 'ip_route_remote_lc.json' else: bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -240,7 +244,8 @@ def mock_run_bgp_route_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constan _old_run_bgp_command = bgp_util.run_bgp_command if any([request.param == 'ip_route', request.param == 'ip_specific_route', request.param == 'ip_special_route', - request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): + request.param == 'ipv6_route', request.param == 'ipv6_specific_route', + request.param == 'ip_route_lc', request.param == 'ip_route_remote_lc']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_bgp_route_command("", "")) elif request.param.startswith('ipv6_route_err'): @@ -303,6 +308,12 @@ def setup_multi_asic_bgp_instance(request): request.param.startswith('bgp_v4_neighbor') or \ request.param.startswith('bgp_v6_neighbor'): m_asic_json_file = request.param + elif request.param == 'ip_route_lc': + m_asic_json_file = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + m_asic_json_file = 'ip_route_remote_lc.json' + elif request.param == 'ip_route_lc_2': + m_asic_json_file = 'ip_route_lc_2.json' else: m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') diff --git a/tests/ip_show_routes_multi_asic_test.py b/tests/ip_show_routes_multi_asic_test.py index bfce5e539d..08bea36910 100644 --- a/tests/ip_show_routes_multi_asic_test.py +++ b/tests/ip_show_routes_multi_asic_test.py @@ -1,10 +1,11 @@ import os from importlib import reload - import pytest + from . import show_ip_route_common from click.testing import CliRunner + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") diff --git a/tests/ip_show_routes_voq_chassis_test.py b/tests/ip_show_routes_voq_chassis_test.py new file mode 100644 index 0000000000..de7f7ade8f --- /dev/null +++ b/tests/ip_show_routes_voq_chassis_test.py @@ -0,0 +1,112 @@ +import os +from importlib import reload +import pytest +from unittest import mock + +import show.main as show +from . import show_ip_route_common +import utilities_common.multi_asic as multi_asic_util +from click.testing import CliRunner + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") + + +class TestMultiAsicVoqLcShowIpRouteDisplayAllCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic + reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc_def_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc_default_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc_2'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + @mock.patch.object(multi_asic_util.MultiAsic, "get_ns_list_based_on_options", + mock.MagicMock(return_value=["asic0", "asic1"])) + def test_voq_chassis_lc_def_route_2( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) diff --git a/tests/mock_tables/asic0/ip_route_lc.json b/tests/mock_tables/asic0/ip_route_lc.json new file mode 100644 index 0000000000..19cfd5e5f0 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc.json @@ -0,0 +1,66 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 4, + "internalNextHopActiveNum": 4, + "nexthopGroupId": 566, + "installedNexthopGroupId": 566, + "uptime": "04w0d11h", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 2, + "interfaceName": "PortChannel1", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 4, + "interfaceName": "PortChannel5", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 5, + "interfaceName": "PortChannel9", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 3, + "interfaceName": "PortChannel13", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_lc_2.json b/tests/mock_tables/asic0/ip_route_lc_2.json new file mode 100644 index 0000000000..8cadf1db22 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2122, + "installedNexthopGroupId": 2122, + "uptime": "01:01:51", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel102", + "active": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "10.0.0.7", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_remote_lc.json b/tests/mock_tables/asic0/ip_route_remote_lc.json new file mode 100644 index 0000000000..0e8f4a56c7 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc.json b/tests/mock_tables/asic1/ip_route_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc_2.json b/tests/mock_tables/asic1/ip_route_lc_2.json new file mode 100644 index 0000000000..f7dff5d51b --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2173, + "installedNexthopGroupId": 2173, + "uptime": "01:01:57", + "nexthops": [ + { + "flags": 5, + "ip": "10.0.0.1", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel106", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_remote_lc.json b/tests/mock_tables/asic1/ip_route_remote_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/show_ip_route_common.py b/tests/show_ip_route_common.py index 101b23309c..899915a1f4 100644 --- a/tests/show_ip_route_common.py +++ b/tests/show_ip_route_common.py @@ -875,3 +875,60 @@ Totals 6467 6466 """ + +SHOW_IP_ROUTE_REMOTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B> 0.0.0.0/0 [200/0] via 20.1.24.128, recursive via iBGP 04w0d12h + via 20.1.16.128, recursive via iBGP 04w0d12h + via 20.1.8.128, recursive via iBGP 04w0d12h + via 20.1.0.128, recursive via iBGP 04w0d12h +""" + +SHOW_IP_ROUTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B>*0.0.0.0/0 [20/0] via 20.1.24.128, PortChannel13, 04w0d11h + * via 20.1.16.128, PortChannel9, 04w0d11h + * via 20.1.8.128, PortChannel5, 04w0d11h + * via 20.1.0.128, PortChannel1, 04w0d11h +""" + +SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 200, metric 0, best + Last update 04w0d12h ago + * 20.1.24.128 recursive via iBGP + * 20.1.16.128 recursive via iBGP + * 20.1.8.128 recursive via iBGP + * 20.1.0.128 recursive via iBGP + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 04w0d11h ago + * 20.1.24.128, via PortChannel13 + * 20.1.16.128, via PortChannel9 + * 20.1.8.128, via PortChannel5 + * 20.1.0.128, via PortChannel1 + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 01:01:51 ago + * 10.0.0.7, via PortChannel106 + * 10.0.0.1, via PortChannel102 + +""" From d103bfd7247d90703d7dd6680d21440b34fbed34 Mon Sep 17 00:00:00 2001 From: Vivek Date: Mon, 9 Sep 2024 09:43:42 -0700 Subject: [PATCH 02/13] Fix ntp conf file path (#3525) --- show/main.py | 2 +- tests/ntp.conf | 37 +++++++++++++++++++++++++++++++++++++ tests/show_test.py | 14 ++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 tests/ntp.conf diff --git a/show/main.py b/show/main.py index ac9557a6ef..25202e1e42 100755 --- a/show/main.py +++ b/show/main.py @@ -1545,7 +1545,7 @@ def ntp(verbose): """Show NTP running configuration""" ntp_servers = [] ntp_dict = {} - with open("/etc/ntp.conf") as ntp_file: + with open("/etc/ntpsec/ntp.conf") as ntp_file: data = ntp_file.readlines() for line in data: if line.startswith("server "): diff --git a/tests/ntp.conf b/tests/ntp.conf new file mode 100644 index 0000000000..58bf276dce --- /dev/null +++ b/tests/ntp.conf @@ -0,0 +1,37 @@ +############################################################################### +# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY. +# Controlled by ntp-config.service +############################################################################### + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +# To avoid ntpd from panic and exit if the drift between new time and +# current system time is large. +tinker panic 0 + +driftfile /var/lib/ntpsec/ntp.drift +leapfile /usr/share/zoneinfo/leap-seconds.list + +server 10.1.1.1 iburst +restrict 10.1.1.1 kod limited nomodify noquery + +server 10.22.1.12 iburst +restrict 10.22.1.12 kod limited nomodify noquery + + +interface ignore wildcard + + +interface listen eth0 +interface listen 127.0.0.1 + + +# Access control configuration +# By default, exchange time with everybody, but don't allow configuration. +# NTPsec doesn't establish peer associations, and so nopeer has no effect, and +# has been removed from here +restrict default kod nomodify noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 diff --git a/tests/show_test.py b/tests/show_test.py index 4cd29ac45e..d81192367a 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1064,6 +1064,20 @@ def test_rc_syslog(self, mock_rc): assert result.exit_code == 0 assert '[1.1.1.1]' in result.output + @patch('builtins.open', mock_open( + read_data=open('tests/ntp.conf').read())) + def test_ntp(self): + runner = CliRunner() + + result = runner.invoke( + show.cli.commands['runningconfiguration'].commands['ntp']) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert '10.1.1.1' in result.output + assert '10.22.1.12' in result.output + @classmethod def teardown_class(cls): print('TEARDOWN') From 5fdc1b61c8c58582c8cbc1845eddc8840769e380 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Mon, 9 Sep 2024 20:09:42 +0300 Subject: [PATCH 03/13] [Mellanox] Add new SKU Mellanox-SN5600-C256 (#3431) - What I did Update sonic-utilities to support new SKU Mellanox-SN5600-C256 Add the SKU to the generic configuration updater Simplify the logic of the buffer migrator to support the new SKU - How I did it - How to verify it Manual and unit tests --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index a379e7282f..622d73a68f 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From a7897d1fd43603272f99dd7daaa08dc28f5bda7a Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:05:41 -0700 Subject: [PATCH 04/13] [show][interface][counters] Add proposal and changes for fec-histogram for interface counters fec-histogram subcommand (#3519) * [show][interface][counters] Add proposal and changes for fec-histogram for show int counters fec-histogram subcommand Signed-off-by: Vaibhav Dahiya * add implementation Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add UT Signed-off-by: Vaibhav Dahiya * fix test Signed-off-by: Vaibhav Dahiya * correct doc Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add cosmetic fix Signed-off-by: Vaibhav Dahiya * add fixes Signed-off-by: Vaibhav Dahiya * pep 8 Signed-off-by: Vaibhav Dahiya * add indentation Signed-off-by: Vaibhav Dahiya --------- Signed-off-by: Vaibhav Dahiya --- doc/Command-Reference.md | 34 +++++++++++++++ show/interfaces/__init__.py | 70 ++++++++++++++++++++++++++++++ tests/mock_tables/counters_db.json | 18 +++++++- tests/portstat_test.py | 30 +++++++++++++ 4 files changed, 151 insertions(+), 1 deletion(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index cdc3f5644d..be0bd14fdd 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -4806,6 +4806,7 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters errors show interfaces counters rates show interfaces counters rif [-p|--period ] [-i ] + show interfaces counters fec-histogram [-i ] ``` - Example: @@ -4923,6 +4924,39 @@ Optionally, you can specify a period (in seconds) with which to gather counters admin@sonic:~$ sonic-clear rifcounters ``` +The "fec-histogram" subcommand is used to display the fec histogram for the port. + +When data is transmitted, it's broken down into units called codewords. FEC algorithms add extra data to each codeword that can be used to detect and correct errors in transmission. +In a FEC histogram, "bins" represent ranges of errors or specific categories of errors. For instance, Bin0 might represent codewords with no errors, while Bin1 could represent codewords with a single bit error, and so on. The histogram shows how many codewords fell into each bin. A high number in the higher bins might indicate a problem with the transmission link, such as signal degradation. + +- Example: + ``` + admin@str-s6000-acs-11:/usr/bin$ show interface counters fec-histogram -i + +Symbol Errors Per Codeword Codewords +-------------------------- --------- +BIN0: 1000000 +BIN1: 900000 +BIN2: 800000 +BIN3: 700000 +BIN4: 600000 +BIN5: 500000 +BIN6: 400000 +BIN7: 300000 +BIN8: 0 +BIN9: 0 +BIN10: 0 +BIN11: 0 +BIN12: 0 +BIN13: 0 +BIN14: 0 +BIN15: 0 + + ``` + + + + **show interfaces description** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and Description. diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 9287eb5af7..f8889e6c32 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -18,6 +18,8 @@ HWSKU_JSON = 'hwsku.json' +REDIS_HOSTIP = "127.0.0.1" + # Read given JSON file def readJsonFile(fileName): try: @@ -646,6 +648,74 @@ def fec_stats(verbose, period, namespace, display): clicommon.run_command(cmd, display_cmd=verbose) + +def get_port_oid_mapping(): + ''' Returns dictionary of all ports interfaces and their OIDs. ''' + db = SonicV2Connector(host=REDIS_HOSTIP) + db.connect(db.COUNTERS_DB) + + port_oid_map = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP') + + db.close(db.COUNTERS_DB) + + return port_oid_map + + +def fetch_fec_histogram(port_oid_map, target_port): + ''' Fetch and display FEC histogram for the given port. ''' + asic_db = SonicV2Connector(host=REDIS_HOSTIP) + asic_db.connect(asic_db.ASIC_DB) + + config_db = ConfigDBConnector() + config_db.connect() + + counter_db = SonicV2Connector(host=REDIS_HOSTIP) + counter_db.connect(counter_db.COUNTERS_DB) + + if target_port not in port_oid_map: + click.echo('Port {} not found in COUNTERS_PORT_NAME_MAP'.format(target_port), err=True) + raise click.Abort() + + port_oid = port_oid_map[target_port] + asic_db_kvp = counter_db.get_all(counter_db.COUNTERS_DB, 'COUNTERS:{}'.format(port_oid)) + + if asic_db_kvp is not None: + + fec_errors = {f'BIN{i}': asic_db_kvp.get + (f'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S{i}', '0') for i in range(16)} + + # Prepare the data for tabulation + table_data = [(bin_label, error_value) for bin_label, error_value in fec_errors.items()] + + # Define headers + headers = ["Symbol Errors Per Codeword", "Codewords"] + + # Print FEC histogram using tabulate + click.echo(tabulate(table_data, headers=headers)) + else: + click.echo('No kvp found in ASIC DB for port {}, exiting'.format(target_port), err=True) + raise click.Abort() + + asic_db.close(asic_db.ASIC_DB) + config_db.close(config_db.CONFIG_DB) + counter_db.close(counter_db.COUNTERS_DB) + + +# 'fec-histogram' subcommand ("show interfaces counters fec-histogram") +@counters.command('fec-histogram') +@multi_asic_util.multi_asic_click_options +@click.argument('interfacename', required=True) +def fec_histogram(interfacename, namespace, display): + """Show interface counters fec-histogram""" + port_oid_map = get_port_oid_mapping() + + # Try to convert interface name from alias + interfacename = try_convert_interfacename_from_alias(click.get_current_context(), interfacename) + + # Fetch and display the FEC histogram + fetch_fec_histogram(port_oid_map, interfacename) + + # 'rates' subcommand ("show interfaces counters rates") @counters.command() @click.option('-p', '--period') diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 2f16c7014d..9e553c2901 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -882,7 +882,23 @@ "SAI_PORT_STAT_ETHER_STATS_JABBERS": "0", "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "130402", "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "3", - "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4" + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0": "1000000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1": "900000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2": "800000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3": "700000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4": "600000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5": "500000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6": "400000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7": "300000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15": "0" }, "COUNTERS:oid:0x1000000000013": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "4", diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 3af704e66e..af9814f812 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -42,6 +42,27 @@ Ethernet8 N/A 100,317 0 0 """ +intf_fec_counters_fec_hist = """\ +Symbol Errors Per Codeword Codewords +---------------------------- ----------- +BIN0 1000000 +BIN1 900000 +BIN2 800000 +BIN3 700000 +BIN4 600000 +BIN5 500000 +BIN6 400000 +BIN7 300000 +BIN8 0 +BIN9 0 +BIN10 0 +BIN11 0 +BIN12 0 +BIN13 0 +BIN14 0 +BIN15 0 +""" + intf_fec_counters_period = """\ The rates are calculated within 3 seconds period IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR @@ -337,6 +358,15 @@ def test_show_intf_fec_counters(self): assert return_code == 0 assert result == intf_fec_counters + def test_show_intf_counters_fec_histogram(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"].commands["fec-histogram"], ["Ethernet0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_fec_counters_fec_hist + def test_show_intf_fec_counters_period(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["fec-stats"], From 8fa076d2fe7871ab0e2f34e352128de4dda51bd1 Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Wed, 11 Sep 2024 07:22:32 +0200 Subject: [PATCH 05/13] sonic-installer: enhance next image detection for Aboot (#3433) The Aboot bootloader relies of the SWI= keyword argument in the boot-config file to know which image to boot. This value is also used by sonic-installer to figure to extract the next image that will be executed. The current code has an issue as it only expects the next image to match the installation path of a SONiC image but not anything else. This means that `SWI=flash:sonic-aboot-broadcom.swi` is not valid and can therefore be a problem when trying to install a new image via cold reboot. Additionally a missing or empty boot-config would generate a python backtrace instead of gracefully recovering from this state. --- sonic_installer/bootloader/aboot.py | 7 ++++++- tests/installer_bootloader_aboot_test.py | 21 +++++++++++++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ac327feb4c..d6492171ab 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -71,6 +71,8 @@ class AbootBootloader(Bootloader): def _boot_config_read(self, path=BOOT_CONFIG_PATH): config = collections.OrderedDict() + if not os.path.exists(path): + return config with open(path) as f: for line in f.readlines(): line = line.strip() @@ -112,7 +114,10 @@ def get_installed_images(self): def get_next_image(self): config = self._boot_config_read() - match = re.search(r"flash:/*(\S+)/", config['SWI']) + swi = config.get('SWI', '') + match = re.search(r"flash:/*(\S+)/", swi) + if not match: + return swi.split(':', 1)[-1] return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def set_default_image(self, image): diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index fbe580a638..be09223b5f 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -8,6 +8,7 @@ # Constants image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +image_chainloader = f'{image_dir}/.sonic-boot.swi' exp_image = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' image_dirs = [image_dir] @@ -45,15 +46,27 @@ def test_get_installed_images(): assert bootloader.get_installed_images() == [exp_image] -@patch("sonic_installer.bootloader.aboot.re.search") -def test_get_next_image(re_search_patch): +def test_get_next_image(): bootloader = aboot.AbootBootloader() - bootloader._boot_config_read = Mock(return_value={'SWI': None}) + + # Test missing boot-config + bootloader._boot_config_read() + + # Test missing SWI value + bootloader._boot_config_read = Mock(return_value={}) + assert bootloader.get_next_image() == '' # Test convertion image dir to image name - re_search_patch().group = Mock(return_value=image_dir) + swi = f'flash:{image_chainloader}' + bootloader._boot_config_read = Mock(return_value={'SWI': swi}) assert bootloader.get_next_image() == exp_image + # Test some other image + next_image = 'EOS.swi' + bootloader._boot_config_read = Mock(return_value={'SWI': f'flash:{next_image}'}) + assert bootloader.get_next_image() == next_image + + def test_install_image(): image_path = 'sonic' env = os.environ.copy() From ad5b0c0aae083156b0b7c0dac10ebbb3cd4c9e07 Mon Sep 17 00:00:00 2001 From: noaOrMlnx <58519608+noaOrMlnx@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:06:52 +0300 Subject: [PATCH 06/13] [Mellanox] Add SPC5 to generic config updater file (#3542) --- .../gcu_field_operation_validators.conf.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 622d73a68f..8b42812af0 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,8 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ], + "spc5": ["ACS-SN5640"] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From 1aac5e2c37c26ae5000820a117427438c5f511d4 Mon Sep 17 00:00:00 2001 From: Vineet Mittal <46945843+vmittal-msft@users.noreply.github.com> Date: Wed, 11 Sep 2024 09:38:07 -0700 Subject: [PATCH 07/13] [VoQ chassis] : Script to debug packet drops (#3536) * Script to debug packet loss on VoQ chassis * Updated setup.py with new script * Fixed the order of the script * Fixed error in pre-check --- scripts/debug_voq_chassis_packet_drops.sh | 371 ++++++++++++++++++++++ setup.py | 1 + 2 files changed, 372 insertions(+) create mode 100755 scripts/debug_voq_chassis_packet_drops.sh diff --git a/scripts/debug_voq_chassis_packet_drops.sh b/scripts/debug_voq_chassis_packet_drops.sh new file mode 100755 index 0000000000..53e21c6f09 --- /dev/null +++ b/scripts/debug_voq_chassis_packet_drops.sh @@ -0,0 +1,371 @@ +#!/usr/bin/bash +# defaults for env vars +sleep_period=${sleep_period:-0} +maxiter=${maxiter:-25} # all but 4 iterations will be polling Egress drops +log=${log:-/dev/stdout} +time_format="%D %T.%6N" +delim="END" +# options +ing_check_mc=${ing_check_mc:-1} +ing_check_macsec=${ing_check_macsec:-1} +egr_check_mc=${egr_check_mc:-1} +egr_check_pmf_hit_bits=${egr_check_pmf_hit_bits:-1} +egr_diag_counter_g=${egr_diag_counter_g:-1} + +declare -a cores=("0" "1") +declare -a asics=("0" "1") +queue_pair_mask_a=(0 0 0 0) +dsp_map_a=(0 0 0 0) + +timestamp(){ + curr_time=$(date +"$time_format") + echo "$curr_time $logmsg" >> $log +} + +print_pqp_reasons() { + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total PDs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total PDs UC pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- Per port UC PDs threshold" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- Per queue UC PDs thresholds">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- Per port UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Per queue UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Per queue disable bit">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- Undefined">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Total PDs MC pool size threshold">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Per interface PDs threhold">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- MC SP threshold">> $log ; fi + if [ $(($disc_reasons & 2048)) -ne 0 ] ; then echo "11- per MC-TC threshold">> $log ; fi + if [ $(($disc_reasons & 4096)) -ne 0 ] ; then echo "12- MC PDs per port threshold">> $log ; fi + if [ $(($disc_reasons & 8192)) -ne 0 ] ; then echo "13- MC PDs per queue threshold">> $log ; fi + if [ $(($disc_reasons & 16384)) -ne 0 ] ; then echo "14- MC per port size (bytes) threshold">> $log ; fi + if [ $(($disc_reasons & 32768)) -ne 0 ] ; then echo "15- MC per queue size(bytes) thresholds">> $log ; fi +} +print_rqp_reasons(){ + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total DBs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total UC DBs pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- UC packet discarded in EMR because UC FIFO is full" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- MC HP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- MC LP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Total MC DBs pool size threshold violated">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Packet-DP is not eligible to take from shared DBs resources">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- USP DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Discrete-Partitioning method: MC-TC DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Strict-priority method: MC-TC mapped to SP0 DBs threshold violated">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- Strict-Priority method: MC-TC mapped to SP1 DBs threshold violated">> $log ; fi +} + +# whenever port_disabled mask change, print the up ports +# (according to the queue-pair mask and DSP port mapping, which is what matters ) + +check_new_port_state() { + last_queue_pair_mask=${queue_pair_mask_a[$index]} + queue_pair_mask=$(bcmcmd -n $asic "g hex ECGM_CGM_QUEUE_PAIR_DISABLED.ECGM${core}" | head -n +2 | tail -1) + if [ "$queue_pair_mask" == "$last_queue_pair_mask" ] ; then + return + fi + queue_pair_mask_a[$index]=$queue_pair_mask + logmsg="EGRESS_QPAIR asic $asic core $core new disabled mask: $queue_pair_mask" + timestamp + + start_dsp=$core + let amt=255-$core + dsp_map_a[$index]=$(bcmcmd -n $asic "d SCH_DSP_2_PORT_MAP_DSPP.SCH${core} $start_dsp $amt") + + hr_num=0 + for pos in {-3..-129..-2}; do # todo + byte=${queue_pair_mask:pos:2} + if [ $hr_num -le 8 ] ; then + hr_num_hex="HR_NUM=${hr_num}" + else + hr_num_hex=$(printf "HR_NUM=0x%x" $hr_num) + fi + hr_num=$(( hr_num + 8)) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + if [ $found -eq 1 ] ; then + continue + fi + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ "$byte" = "ff" ]; then + printf "DOWN %3d ${entry}\n" $dsp_port >> $log + else + printf "UP %3d ${entry}\n" $dsp_port >> $log + fi + done + echo >> $log +} + +decode_last_rqp_drop() { + rqp_disc=$(bcmcmd -n $asic "g hex ECGM_RQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1) + prefix=${rqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP RQP_DISCARD_REASONS asic $asic core $core index $index: $rqp_disc" + timestamp + disc_reasons=${rqp_disc: -4: 3} + print_rqp_reasons +} + +decode_last_pqp_drop() { + pqp_disc=$(bcmcmd -n $asic "g hex ECGM_PQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1 ) + prefix=${pqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP PQP_DISCARD_REASONS asic $asic core $core: $pqp_disc" + timestamp + check_new_port_state # in case the DSP map has changed + disc_reasons=${pqp_disc: -5: 4} + last_reason=${pqp_disc: -9: 4} + drop_cmd=${pqp_disc: -19: 10} + queue=${drop_cmd: -8: 3} + queue=$((16#${queue})) + queue=$(($queue / 4 )) + queue=$(($queue & 248)) + hr_num_hex=$(printf "%02x" $queue) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ $found -eq 1 ] ; then + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp_port not_found" >> $log + else + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp port $dsp_port" >> $log + fi + echo "pqp discard reasons (cumulative since last read):" >> $log + print_pqp_reasons + echo "pqp last packet discard reasons:" >> $log + disc_reasons=$last_reason + print_pqp_reasons + echo >> $log +} + + +clear_tcam_hit_bits() { + cint_filename="/tmp/hitbits" + cint=';print bcm_field_entry_hit_flush(0, BCM_FIELD_ENTRY_HIT_FLUSH_ALL, 0); exit;' + bcmcmd -n $asic "log off; rm $cint_filename;log file=$cint_filename quiet=yes; echo '$cint';log off;cint $cint_filename" >> /dev/null +} + +dump_tcam_drop_action_hits() { + echo "SAI_FG_TRAP hits:" >> $log + bcmcmd -n $asic "dbal table dump Table=SAI_FG_TRAP" | grep "CORE" | awk -F'|' '{print $2,$34}' >> $log + echo "EPMF_Cascade hits:" >> $log + # entries 51,52,53,54,55,56 have drop action + bcmcmd -n $asic "dbal table dump Table=EPMF_Cascade" | grep "CORE" | awk -F'|' '{print $2,$10}'>> $log + clear_tcam_hit_bits +} + +check_egress_drops() { + hit=0 + pqp_uc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_UNICAST_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + erpp_discard=$(bcmcmd -n $asic "g hex PQP_ERPP_DISCARDED_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + rqp_debug_counters=$(bcmcmd -n $asic "g RQP_PRP_DEBUG_COUNTERS.RQP${core}" | head -n -1 | tail -n +2 | sed -e 's/=/ /g'| sed -e 's/,/ /g'|tr -dc "[:alnum:] =_" ) + + pqp_uc_discard=$(printf "%d" $pqp_uc_discard) + erpp_discard=$(printf "%d" $erpp_discard) + + if [ $pqp_uc_discard -ne 0 ]; then + logmsg="EGRESS_DROP UC_DROP on ASIC $asic CORE $core : PQP_DISCARD_UNICAST_PACKET_COUNTER = $pqp_uc_discard" + timestamp + hit=1; + fi + if [ $erpp_discard -ne 0 ]; then + logmsg="EGRESS_DROP ERPP_DROP on ASIC $asic CORE $core : PQP_ERPP_DISCARDED_PACKET_COUNTER = $erpp_discard" + timestamp + hit=1; + fi + + sop_discard_uc=$(echo $rqp_debug_counters | awk {'print $4'}) + prp_discard_uc=$(echo $rqp_debug_counters | awk {'print $14'}) + dbf_err_cnt=$(echo $rqp_debug_counters | awk {'print $18'}) + + sop_discard_uc=$(printf "%d" $sop_discard_uc) + prp_discard_uc=$(printf "%d" $prp_discard_uc) + dbf_err_cnt=$(printf "%d" $dbf_err_cnt) + + if [ $sop_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_UC_DISCARD on ASIC $asic CORE $core : $sop_discard_uc" + timestamp + hit=1; + fi + if [ $prp_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_UC_DISCARD on ASIC $asic CORE $core : $prp_discard_uc" + timestamp + hit=1; + fi + if [ $dbf_err_cnt -ne 0 ]; then + logmsg="EGRESS_DROP RQP_DBF_ERR on ASIC $asic CORE $core : $dbf_err_cnt" + timestamp + hit=1; + fi + if [ $egr_check_mc -ne 0 ]; then + sop_discard_mc=$(echo $rqp_debug_counters | awk {'print $6'}) + prp_discard_mc=$(echo $rqp_debug_counters | awk {'print $16'}) + sop_discard_mc=$(printf "%d" $sop_discard_mc) + prp_discard_mc=$(printf "%d" $prp_discard_mc) + + pqp_mc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_MULTICAST_PACKET_COUNTER.PQP${core}" | head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + pqp_mc_discard=$(printf "%d" $pqp_mc_discard) + if [ $pqp_mc_discard -ne 0 ]; then + logmsg="EGRESS_DROP MC_DROP ASIC $asic CORE $core : PQP_DISCARD_MULTICAST_PACKET_COUNTER = $pqp_mc_discard" + timestamp + hit=1; + fi + if [ $sop_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_MC_DISCARD on ASIC $asic CORE $core : $sop_discard_mc" + timestamp + hit=1; + fi + if [ $prp_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_MC_DISCARD on ASIC $asic CORE $core : $prp_discard_mc" + timestamp + hit=1; + fi + fi + if [ $hit -eq 0 ] ; then + return + fi + + decode_last_pqp_drop + # bcmcmd -n $asic "g chg ECGM_RQP_DISCARD_REASONS.ECGM${core}" | grep "=" >> $log + decode_last_rqp_drop + bcmcmd -n $asic "g chg PQP_INTERRUPT_REGISTER.PQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg RQP_INTERRUPT_REGISTER.RQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s PQP_INTERRUPT_REGISTER.PQP${core} -1" > /dev/null + bcmcmd -n $asic "s RQP_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core}"| tail -2 | head -n -1 >> $log + bcmcmd -n $asic "s RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg FDR_INTERRUPT_REGISTER.FDR${core}"| head -n -1 | tail -n +2 >> $log + # FDA0 block is shared by both cores + bcmcmd -n $asic "g chg FDA_INTERRUPT_REGISTER.FDA0"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s FDR_INTERRUPT_REGISTER.FDR${core} -1" > /dev/null + bcmcmd -n $asic "s FDA_INTERRUPT_REGISTER.FDA0 -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2>> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}" >> $log + bcmcmd -n $asic "tm egr con"| head -n -1 | tail -n +2 >> $log + + if [ $egr_check_pmf_hit_bits -eq 1 ]; then + dump_tcam_drop_action_hits + fi + if [ $egr_diag_counter_g -eq 1 ]; then + bcmcmd -n $asic "diag counter g nz core=${core}"| head -n -1 | tail -n +2 >> $log + fi + echo "$delim" >> $log + echo >> $log +} + +dump_ingress_traps() { + bcmcmd -n $asic "g IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core}" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core} -1"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g IPPE_DBG_LLR_TRAP_0.IPPE${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPE_DBG_LLR_TRAP_0.IPPE${core} -1"| head -n -1 | tail -n +2 >> $log +} +dump_macsec() { + bcmcmd -n $asic "sec stat show; sec stat clear" >> $log +} + +rjct_filename=rjct_status.txt + +check_ingress_drops() { + hit=0 + bcmcmd -n $asic "getreg chg CGM_REJECT_STATUS_BITMAP.CGM${core}" | awk '{split($0,a,":"); print a[2]}' > $rjct_filename + while read -r line; do + [ -z $line ] && continue + res=$(echo $line | grep -v "," | grep "<>") + if [ -z $res ]; then + hit=1 + fi + done < "$rjct_filename" + + if [ $hit == 1 ]; then + logmsg="INGRESS_DROP asic $asic core $core" + timestamp + cat $rjct_filename >> $log + bcmcmd -n $asic "g CGM_MAX_VOQ_WORDS_QSIZE_TRACK.CGM${core}" | head -n -1 | tail -n +2 >> $log + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "tm ing cong core=$core" >> $log + bcmcmd -n $asic "trap last info core=$core" >> $log + bcmcmd -n $asic "pp vis ppi core=$core" >> $log + bcmcmd -n $asic "pp vis fdt core=$core" >> $log + bcmcmd -n $asic "pp vis ikleap core=$core" >> $log + #bcmcmd -n $asic "pp vis last" >> $log + if [ $ing_check_mc -eq 1 ] ; then + bcmcmd -n $asic "dbal table dump table=mcdb" >> $log + bcmcmd -n $asic "g MTM_ING_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g MTM_EGR_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + fi + bcmcmd -n $asic "diag counter g nz core=${core}" >> $log + echo "" >> $log + dump_ingress_traps + echo "" >> $log + if [ $ing_check_macsec -eq 1 ] ; then + dump_macsec + fi + echo "$delim" >> $log + fi +} + +# clear stats +for asic in "${asics[@]}" +do + bcmcmd -n $asic "sec stat clear; clear counter; clear interrupt all" >> /dev/null +done + +iter_a=(0 0 0 0) +while true; +do + for asic in "${asics[@]}" + do + for core in "${cores[@]}" + do + index=$(($asic*2+$core)) + iter=$((${iter_a[$index]}+1)) + if [ $iter -eq $maxiter ] ; then + iter_a[$index]=0; + sleep $sleep_period + continue + fi + iter_a[$index]=$iter + # for majority of polling cycles, check the PQP drop reason and queue + if [ $iter -gt 4 ] ; then + decode_last_pqp_drop + continue + fi + # check for any change in pqp disabled port mask + if [ $iter -eq 1 ] ; then + check_new_port_state + continue + fi + if [ $iter -eq 2 ] ; then + check_egress_drops + continue + fi + if [ $iter -eq 3 ]; then + check_ingress_drops + continue + fi + if [ $iter -eq 4 ]; then + decode_last_rqp_drop + fi + done + done +done + diff --git a/setup.py b/setup.py index 5d0dc0ea35..520530b532 100644 --- a/setup.py +++ b/setup.py @@ -124,6 +124,7 @@ 'scripts/dropstat', 'scripts/dualtor_neighbor_check.py', 'scripts/dump_nat_entries.py', + 'scripts/debug_voq_chassis_packet_drops.sh', 'scripts/ecnconfig', 'scripts/fabricstat', 'scripts/fanshow', From 2cb8cc65b6dc57d9613ce271a681743aa4fa0f3c Mon Sep 17 00:00:00 2001 From: Xinyu Lin Date: Thu, 12 Sep 2024 01:52:56 +0800 Subject: [PATCH 08/13] [sfputil] Configure the debug loopback mode only on the relevant lanes of the logical port (#3485) * [sfputil] Configure the debug loopback mode only on the relevant lanes of the logical port Signed-off-by: xinyu --- doc/Command-Reference.md | 8 ++-- sfputil/main.py | 82 ++++++++++++++++++++++++++++++++++++---- tests/sfputil_test.py | 65 +++++++++++++++++++++++++++---- 3 files changed, 135 insertions(+), 20 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index be0bd14fdd..7697f235f7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3144,19 +3144,19 @@ This command is the standard CMIS diagnostic control used for troubleshooting li - Usage: ``` - sfputil debug loopback PORT_NAME LOOPBACK_MODE + sfputil debug loopback PORT_NAME LOOPBACK_MODE - Set the loopback mode + Valid values for loopback mode host-side-input: host side input loopback mode host-side-output: host side output loopback mode media-side-input: media side input loopback mode media-side-output: media side output loopback mode - none: disable loopback mode ``` - Example: ``` - admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input enable + admin@sonic:~$ sfputil debug loopback Ethernet88 media-side-output disable ``` ## DHCP Relay diff --git a/sfputil/main.py b/sfputil/main.py index 2c8f85d016..58c6855abe 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -18,7 +18,7 @@ import sonic_platform import sonic_platform_base.sonic_sfp.sfputilhelper from sonic_platform_base.sfp_base import SfpBase -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector from natsort import natsorted from sonic_py_common import device_info, logger, multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string @@ -1967,11 +1967,12 @@ def debug(): # 'loopback' subcommand @debug.command() -@click.argument('port_name', required=True, default=None) -@click.argument('loopback_mode', required=True, default="none", - type=click.Choice(["none", "host-side-input", "host-side-output", +@click.argument('port_name', required=True) +@click.argument('loopback_mode', required=True, + type=click.Choice(["host-side-input", "host-side-output", "media-side-input", "media-side-output"])) -def loopback(port_name, loopback_mode): +@click.argument('enable', required=True, type=click.Choice(["enable", "disable"])) +def loopback(port_name, loopback_mode, enable): """Set module diagnostic loopback mode """ physical_port = logical_port_to_physical_port_index(port_name) @@ -1991,17 +1992,82 @@ def loopback(port_name, loopback_mode): click.echo("{}: This functionality is not implemented".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + namespace = multi_asic.get_namespace_for_port(port_name) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + if config_db is not None: + config_db.connect() + try: + subport = int(config_db.get(config_db.CONFIG_DB, f'PORT|{port_name}', 'subport')) + except TypeError: + click.echo(f"{port_name}: subport is not present in CONFIG_DB") + sys.exit(EXIT_FAIL) + + # If subport is set to 0, assign a default value of 1 to ensure valid subport configuration + if subport == 0: + subport = 1 + else: + click.echo(f"{port_name}: Failed to connect to CONFIG_DB") + sys.exit(EXIT_FAIL) + + state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + if state_db is not None: + state_db.connect(state_db.STATE_DB) + try: + host_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'host_lane_count')) + except TypeError: + click.echo(f"{port_name}: host_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + + try: + media_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'media_lane_count')) + except TypeError: + click.echo(f"{port_name}: media_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + else: + click.echo(f"{port_name}: Failed to connect to STATE_DB") + sys.exit(EXIT_FAIL) + + if 'host-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, host_lane_count) + elif 'media-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, media_lane_count) + else: + lane_mask = 0 + try: - status = api.set_loopback_mode(loopback_mode) + status = api.set_loopback_mode(loopback_mode, + lane_mask=lane_mask, + enable=enable == 'enable') except AttributeError: click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + except TypeError: + click.echo("{}: Set loopback mode failed. Parameter is not supported".format(port_name)) + sys.exit(EXIT_FAIL) if status: - click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback".format(port_name, enable, loopback_mode)) else: - click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback failed".format(port_name, enable, loopback_mode)) sys.exit(EXIT_FAIL) + +def get_subport_lane_mask(subport, lane_count): + """Get the lane mask for the given subport and lane count + + Args: + subport (int): Subport number + lane_count (int): Lane count for the subport + + Returns: + int: Lane mask for the given subport and lane count + """ + return ((1 << lane_count) - 1) << ((subport - 1) * lane_count) + + if __name__ == '__main__': cli() diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 0e58daa18e..d8d13df1c0 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1631,11 +1631,16 @@ def test_load_port_config(self, mock_is_multi_asic): @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.ConfigDBConnector') + @patch('sfputil.main.SonicV2Connector') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) - def test_debug_loopback(self, mock_chassis): + @patch('sonic_py_common.multi_asic.get_front_end_namespaces', MagicMock(return_value=[''])) + def test_debug_loopback(self, mock_sonic_v2_connector, mock_config_db_connector, mock_chassis): mock_sfp = MagicMock() mock_api = MagicMock() + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = MagicMock() mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) @@ -1643,31 +1648,75 @@ def test_debug_loopback(self, mock_chassis): runner = CliRunner() mock_sfp.get_presence.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: SFP EEPROM not detected\n' mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: This functionality is not implemented\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) - assert result.output == 'Ethernet0: Set host-side-input loopback\n' + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: enable host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: enable media-side-input loopback\n' assert result.exit_code != ERROR_NOT_IMPLEMENTED mock_api.set_loopback_mode.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) - assert result.output == 'Ethernet0: Set none loopback failed\n' + ["Ethernet0", "media-side-output", "enable"]) + assert result.output == 'Ethernet0: enable media-side-output loopback failed\n' assert result.exit_code == EXIT_FAIL mock_api.set_loopback_mode.return_value = True mock_api.set_loopback_mode.side_effect = AttributeError result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.side_effect = [TypeError, True] + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: Set loopback mode failed. Parameter is not supported\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db = MagicMock() + mock_config_db.get.side_effect = TypeError + mock_config_db_connector.return_value = mock_config_db + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: subport is not present in CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to STATE_DB\n' + assert result.exit_code == EXIT_FAIL + + @pytest.mark.parametrize("subport, lane_count, expected_mask", [ + (1, 1, 0x1), + (1, 4, 0xf), + (2, 1, 0x2), + (2, 4, 0xf0), + (3, 2, 0x30), + (4, 1, 0x8), + ]) + def test_get_subport_lane_mask(self, subport, lane_count, expected_mask): + assert sfputil.get_subport_lane_mask(subport, lane_count) == expected_mask From 5fc0ee6c79951f5a8a0e939fd4fe10183b02e23c Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Mon, 16 Sep 2024 02:48:13 +0300 Subject: [PATCH 09/13] [spm]: Clean up timers auto generation logic. (#3523) Config Reload Enhancements PR https://github.com/sonic-net/SONiC/pull/1203 does not completely remove TIMERs from SONiC Package Manager infra. This PR is intended to complete the original changes. `Systemd` TIMERs infra was replaced by `hostcfgd` service management. That was done to improve reliability of service management. #### What I did * Removed redundant TIMERs infra #### How I did it * Updated SPM auto generation logic #### How to verify it 1. Install application extension ```bash spm install --from-tarball ``` 2. Make sure `delayed` flag is set ```bash docker image inspect | jq '.[].Config.Labels["com.azure.sonic.manifest"]' | python -c 'import sys,ast; print(ast.literal_eval(sys.stdin.read()))' | jq .service.delayed true ``` 3. Check no TIMERs were generated --- sonic-utilities-data/templates/timer.unit.j2 | 19 ---------- .../service_creator/creator.py | 20 +---------- .../service_creator/feature.py | 3 +- tests/sonic_package_manager/conftest.py | 1 - .../test_service_creator.py | 35 ------------------- 5 files changed, 2 insertions(+), 76 deletions(-) delete mode 100644 sonic-utilities-data/templates/timer.unit.j2 diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 deleted file mode 100644 index 09989f2c51..0000000000 --- a/sonic-utilities-data/templates/timer.unit.j2 +++ /dev/null @@ -1,19 +0,0 @@ -# -# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== -# auto-generated from {{ source }} by sonic-package-manager -# -[Unit] -Description=Delays {{ manifest.service.name }} until SONiC has started -PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Timer] -OnUnitActiveSec=0 sec -OnBootSec=3min 30 sec -Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Install] -WantedBy=timers.target sonic.target sonic-delayed.target -{%- for service in manifest.service["wanted-by"] %} -WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service -{%- endfor %} - diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 57f8ac4624..c88e96a44a 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -31,7 +31,6 @@ SERVICE_FILE_TEMPLATE = 'sonic.service.j2' -TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' ETC_SYSTEMD_LOCATION = '/etc/systemd/system' @@ -305,7 +304,7 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. + """ Generates systemd service(s) file for package. Args: package: Package object to generate service for. @@ -333,23 +332,6 @@ def generate_systemd_service(self, package: Package): render_template(template, output_file, template_vars) log.info(f'generated {output_file}') - if package.manifest['service']['delayed']: - template_vars = { - 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), - 'manifest': package.manifest.unmarshal(), - 'multi_instance': False, - } - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') - template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - - if package.manifest['service']['asic-service']: - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') - template_vars['multi_instance'] = True - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - def update_generated_services_conf_file(self, package: Package, remove=False): """ Updates generated_services.conf file. diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 43b6c309fe..32a155206c 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,8 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "delayed" for example if - the new feature introduces a service timer or name of the service has + feature entries have to be updated. e.g: name of the service has changed, but user configurable entries are not changed). Args: diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 98db887941..3d6beae9ff 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -412,7 +412,6 @@ def sonic_fs(fs): fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) fs.create_file(GENERATED_SERVICES_CONF_FILE) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) - fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8278a8da2b..319dcf32ff 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -137,20 +137,6 @@ def read_file(name): assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) -def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): - entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - manifest['service']['delayed'] = True - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) @@ -396,27 +382,6 @@ def test_feature_update(mock_sonic_db, manifest): ], any_order=True) -def test_feature_registration_with_timer(mock_sonic_db, manifest): - manifest['service']['delayed'] = True - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) - feature_registry = FeatureRegistry(mock_sonic_db) - feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'delayed': 'True', - 'check_up_status': 'False', - 'support_syslog_rate_limit': 'False', - }) - - def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) From c6637553fdd1d2fe7d1318383d2e68aa5cd46849 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Mon, 16 Sep 2024 02:57:02 +0300 Subject: [PATCH 10/13] Move from bootctl to mokutil when checking for Secure Boot status (#3486) #### What I did Moved to use mokutil instead of bootctl as bootctl is no longer available in Bookworm. This affected reboot scripts, and upgrade scenario. #### How I did it Change calls to _bootctl status_ with _mokutil --sb-state_ #### How to verify it After fixing the scripts to check reboot: root@sn5600:/home/admin# soft-reboot SECURE_UPGRADE_ENABLED=1 [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]] load_kernel_secure invoke_kexec -s packet_write_wait: port 22: Broken pipe admin@sn5600:~$ show reboot-cause User issued 'soft-reboot' command [User: admin, Time: Tue Jul 23 11:06:43 PM UTC 2024] --- scripts/fast-reboot | 2 +- scripts/soft-reboot | 21 ++++++++++++++++++--- sonic_installer/bootloader/grub.py | 2 +- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index e183c34219..09f8f444ab 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -673,7 +673,7 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then else # check if secure boot is enable in UEFI CHECK_SECURE_UPGRADE_ENABLED=0 - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then debug "Loading kernel without secure boot" load_kernel diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 0b9030a6f7..74d7051b1d 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -93,7 +93,7 @@ function clear_lingering_reboot_config() if [[ -f ${WARM_DIR}/${REDIS_FILE} ]]; then mv -f ${WARM_DIR}/${REDIS_FILE} ${WARM_DIR}/${REDIS_FILE}.${TIMESTAMP} || /bin/true fi - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true } SCRIPT=$0 @@ -147,9 +147,17 @@ function setup_reboot_variables() fi } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + invoke_kexec -s } function reboot_pre_check() @@ -215,7 +223,14 @@ stop_sonic_services clear_lingering_reboot_config -load_kernel +# check if secure boot is enabled +CHECK_SECURE_UPGRADE_ENABLED=0 +SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? +if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + load_kernel +else + load_kernel_secure +fi # Update the reboot cause file to reflect that user issued 'reboot' command # Upon next boot, the contents of this file will be used to determine the diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index d76ddcc0c7..029ebf34f1 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -164,7 +164,7 @@ def is_secure_upgrade_image_verification_supported(self): if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") else echo "efi not supported - exiting without verification" exit 1 From ed624895f218b5c81fabc0c4655b317374e246d7 Mon Sep 17 00:00:00 2001 From: i-davydenko <41341620+i-davydenko@users.noreply.github.com> Date: Mon, 16 Sep 2024 03:57:20 +0300 Subject: [PATCH 11/13] SONIC CLI for CLI-Sessions feature (#3175) HLD: https://github.com/sonic-net/SONiC/pull/1367 | Module name | PR | state | context | | ------------- | ------------- | ----|-----| | [sonic-buildimage](https://github.com/sonic-net/sonic-buildimage) | [Dev cli sessions](https://github.com/sonic-net/sonic-buildimage/pull/17623) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-buildimage/17623) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-buildimage/17623) | | [sonic-host-services](https://github.com/sonic-net/sonic-host-services) | [cli-sessions](https://github.com/sonic-net/sonic-host-services/pull/99) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-host-services/99) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-host-services/99) | | [sonic-utilities](https://github.com/sonic-net/sonic-utilities) | [SONIC CLI for CLI-Sessions feature #3175](https://github.com/sonic-net/sonic-utilities/pull/3175) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-utilities/3175) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-utilities/3175) | #### What I did Implement next commands for CLI-sessions feature: - config serial-console inactivity-timeout - config serial-console sysrq-capabilities - show serial-console - config ssh max-sessions - config ssh inactivity-timeout - show ssh #### How I did it Write handlers in config/main.py for serial-console and ssh commands to cover configuration set; Write handlers in show/main.py for serial-console and ssh to cover show commands. #### How to verify it Manual tests --- config/main.py | 66 ++++++++++++++++++++++++++++++++++++++ show/main.py | 40 +++++++++++++++++++++++ tests/cli_sessions_test.py | 32 ++++++++++++++++++ 3 files changed, 138 insertions(+) create mode 100644 tests/cli_sessions_test.py diff --git a/config/main.py b/config/main.py index f4ea93e53f..bfa6dccadc 100644 --- a/config/main.py +++ b/config/main.py @@ -7987,5 +7987,71 @@ def notice(db, category_list, max_events, namespace): handle_asic_sdk_health_suppress(db, 'notice', category_list, max_events, namespace) +# +# 'serial_console' group ('config serial_console') +# +@config.group(cls=clicommon.AbbreviationGroup, name='serial_console') +def serial_console(): + """Configuring system serial-console behavior""" + pass + + +@serial_console.command('sysrq-capabilities') +@click.argument('sysrq_capabilities', metavar='', required=True, + type=click.Choice(['enabled', 'disabled'])) +def sysrq_capabilities(sysrq_capabilities): + """Set serial console sysrq-capabilities state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'sysrq_capabilities': sysrq_capabilities}) + + +@serial_console.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_serial(inactivity_timeout): + """Set serial console inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +# +# 'ssh' group ('config ssh') +# +@config.group(cls=clicommon.AbbreviationGroup, name='ssh') +def ssh(): + """Configuring system ssh behavior""" + pass + + +@ssh.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_ssh(inactivity_timeout): + """Set ssh inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +@ssh.command('max-sessions') +@click.argument('max-sessions', metavar='', required=True, + type=click.IntRange(0, 100)) +def max_sessions(max_sessions): + """Set max number of concurrent logins""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'max_sessions': max_sessions}) + + if __name__ == '__main__': config() diff --git a/show/main.py b/show/main.py index 25202e1e42..5257b975db 100755 --- a/show/main.py +++ b/show/main.py @@ -2433,6 +2433,46 @@ def received(db, namespace): ctx.fail("ASIC/SDK health event is not supported on the platform") +# +# 'serial_console' command group ("show serial_console ...") +# +@cli.group('serial_console', invoke_without_command=True) +@clicommon.pass_db +def serial_console(db): + """Show serial_console configuration""" + + serial_console_table = db.cfgdb.get_entry('SERIAL_CONSOLE', 'POLICIES') + + hdrs = ['inactivity-timeout', 'sysrq-capabilities'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('sysrq_capabilities', 'disabled ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + +# +# 'ssh' command group ("show ssh ...") +# +@cli.group('ssh', invoke_without_command=True) +@clicommon.pass_db +def ssh(db): + """Show ssh configuration""" + + serial_console_table = db.cfgdb.get_entry('SSH_SERVER', 'POLICIES') + + hdrs = ['inactivity-timeout', 'max-sessions'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('max_session', '0 ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/tests/cli_sessions_test.py b/tests/cli_sessions_test.py new file mode 100644 index 0000000000..755b232708 --- /dev/null +++ b/tests/cli_sessions_test.py @@ -0,0 +1,32 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestCliSessionsCommands: + def test_config_command(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['serial_console'].commands['sysrq-capabilities'], + ['enabled'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['serial_console'].commands['inactivity-timeout'], + ['180'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['serial_console'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['inactivity-timeout'], ['190'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['max-sessions'], ['60'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['ssh'], obj=db) + assert result.exit_code == 0 From b4d27c4bbf528f6cc026d37254d820284192951d Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:31:30 -0700 Subject: [PATCH 12/13] Fix multi-asic behaviour for watermarkstat (#3060) * Adds multi-asic support to watermarkstat, fixing watermark/persistent-watermark related commands. Previously, the following commands were not behaving correctly on multi-asic devices, as the '-n' namespace option was not available, and correct namespaces were not traversed on multi-asic devices. * show buffer_pool watermark/persistent-watermark * show headroom-pool watermark/persistent-watermark * show priority-group persistent-watermark/watermark * show queue persistent-watermark/watermark This change fixes multi-asic behaviour of CLI commands that rely on watermarkstat, as listed above. --- clear/main.py | 158 +++++- scripts/watermarkstat | 132 ++--- show/main.py | 173 ++++++- tests/mock_tables/asic0/counters_db.json | 313 +++++++++++- tests/mock_tables/asic1/counters_db.json | 413 ++++++++++++++++ tests/mock_tables/dbconnector.py | 28 +- tests/multi_asic_pgdropstat_test.py | 16 +- tests/multi_asic_queue_counter_test.py | 32 +- tests/multi_asic_watermarkstat_test.py | 145 ++++++ tests/watermarkstat_test.py | 10 +- tests/wm_input/wm_test_vectors.py | 602 +++++++++++++++++++++-- 11 files changed, 1853 insertions(+), 169 deletions(-) create mode 100644 tests/multi_asic_watermarkstat_test.py diff --git a/clear/main.py b/clear/main.py index 5ffcd2dba4..38dca2737f 100755 --- a/clear/main.py +++ b/clear/main.py @@ -229,16 +229,38 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) @watermark.command('headroom') -def clear_wm_pg_headroom(): +def clear_wm_pg_headroom(namespace): """Clear user headroom WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('shared') -def clear_wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_pg_shared(namespace): """Clear user shared WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -261,16 +283,38 @@ def persistent_watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('headroom') -def clear_pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_headroom(namespace): """Clear persistent headroom WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def clear_pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_shared(namespace): """Clear persistent shared WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @@ -285,69 +329,159 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @watermark.command('unicast') -def clear_wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_uni(namespace): """Clear user WM for unicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('multicast') -def clear_wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_multi(namespace): """Clear user WM for multicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('all') -def clear_wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_all(namespace): """Clear user WM for all queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @queue.group(name='persistent-watermark') def persistent_watermark(): """Clear queue persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('unicast') -def clear_pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_uni(namespace): """Clear persistent WM for persistent queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('multicast') -def clear_pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_multi(namespace): """Clear persistent WM for multicast queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('all') -def clear_pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_all(namespace): """Clear persistent WM for all queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @cli.group(name='headroom-pool') def headroom_pool(): """Clear headroom pool WM""" pass + @headroom_pool.command('watermark') -def watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def watermark(namespace): """Clear headroom pool user WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def persistent_watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def persistent_watermark(namespace): """Clear headroom pool persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-p', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) # diff --git a/scripts/watermarkstat b/scripts/watermarkstat index 99a46d5484..70ea853bc4 100755 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -5,14 +5,15 @@ # watermarkstat is a tool for displaying watermarks. # ##################################################################### - -import argparse +import click import json import os import sys from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -23,6 +24,10 @@ try: sys.path.insert(0, tests_path) from mock_tables import dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + if os.environ["WATERMARKSTAT_UNIT_TESTING"] == "1": input_path = os.path.join(tests_path, "wm_input") mock_db_path = os.path.join(input_path, "mock_db") @@ -66,18 +71,33 @@ COUNTERS_PG_INDEX_MAP = "COUNTERS_PG_INDEX_MAP" COUNTERS_BUFFER_POOL_NAME_MAP = "COUNTERS_BUFFER_POOL_NAME_MAP" -class Watermarkstat(object): +class WatermarkstatWrapper(object): + """A wrapper to execute Watermarkstat over the correct namespaces""" + def __init__(self, namespace): + self.namespace = namespace - def __init__(self): - self.counters_db = SonicV2Connector(use_unix_socket_path=False) - self.counters_db.connect(self.counters_db.COUNTERS_DB) + # Initialize the multi_asic object + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + + @multi_asic_util.run_on_multi_asic + def run(self, clear, persistent, wm_type): + watermarkstat = Watermarkstat(self.db, self.multi_asic.current_namespace) + if clear: + watermarkstat.send_clear_notification(("PERSISTENT" if persistent else "USER", wm_type.upper())) + else: + table_prefix = PERSISTENT_TABLE_PREFIX if persistent else USER_TABLE_PREFIX + watermarkstat.print_all_stat(table_prefix, wm_type) - # connect APP DB for clear notifications - self.app_db = SonicV2Connector(use_unix_socket_path=False) - self.app_db.connect(self.counters_db.APPL_DB) + +class Watermarkstat(object): + + def __init__(self, db, namespace): + self.namespace = namespace + self.db = db def get_queue_type(table_id): - queue_type = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) + queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: print("Queue Type is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -92,7 +112,7 @@ class Watermarkstat(object): sys.exit(1) def get_queue_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -100,7 +120,7 @@ class Watermarkstat(object): return port_table_id def get_pg_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -108,7 +128,7 @@ class Watermarkstat(object): return port_table_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: print("COUNTERS_PORT_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -127,7 +147,7 @@ class Watermarkstat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get Queues for each port - counter_queue_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: print("COUNTERS_QUEUE_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -144,7 +164,7 @@ class Watermarkstat(object): self.port_all_queues_map[port][queue] = counter_queue_name_map[queue] # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) if counter_pg_name_map is None: print("COUNTERS_PG_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -154,7 +174,7 @@ class Watermarkstat(object): self.port_pg_map[port][pg] = counter_pg_name_map[pg] # Get all buffer pools - self.buffer_pool_name_to_oid_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) + self.buffer_pool_name_to_oid_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) if self.buffer_pool_name_to_oid_map is None: print("COUNTERS_BUFFER_POOL_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -194,7 +214,7 @@ class Watermarkstat(object): } def get_queue_index(self, table_id): - queue_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) + queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: print("Queue index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -202,7 +222,7 @@ class Watermarkstat(object): return queue_index def get_pg_index(self, table_id): - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) + pg_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) if pg_index is None: print("Priority group index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -256,7 +276,7 @@ class Watermarkstat(object): full_table_id = table_prefix + obj_id idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, watermark) + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, watermark) if counter_data is None or counter_data == '': fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -274,7 +294,7 @@ class Watermarkstat(object): continue db_key = table_prefix + bp_oid - data = self.counters_db.get(self.counters_db.COUNTERS_DB, db_key, type["wm_name"]) + data = self.db.get(self.db.COUNTERS_DB, db_key, type["wm_name"]) if data is None: data = STATUS_NA table.append((buf_pool, data)) @@ -283,58 +303,52 @@ class Watermarkstat(object): # Get stat for each port for port in natsorted(self.counter_port_name_map): row_data = list() + data = self.get_counters(table_prefix, type["obj_map"][port], type["idx_func"], type["wm_name"]) row_data.append(port) row_data.extend(data) table.append(tuple(row_data)) - print(type["message"]) + namespace_str = f" (Namespace {self.namespace})" if multi_asic.is_multi_asic() else '' + print(type["message"] + namespace_str) print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def send_clear_notification(self, data): msg = json.dumps(data, separators=(',', ':')) - self.app_db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) + self.db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) return - -def main(): - - parser = argparse.ArgumentParser(description='Display the watermark counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - watermarkstat -t pg_headroom - watermarkstat -t pg_shared - watermarkstat -t q_shared_all - watermarkstat -p -t q_shared_all - watermarkstat -t q_shared_all -c - watermarkstat -t q_shared_uni -c - watermarkstat -t q_shared_multi -c - watermarkstat -p -t pg_shared - watermarkstat -p -t q_shared_multi -c - watermarkstat -t buffer_pool - watermarkstat -t buffer_pool -c - watermarkstat -p -t buffer_pool -c -""") - - parser.add_argument('-c', '--clear', action='store_true', help='Clear watermarks request') - parser.add_argument('-p', '--persistent', action='store_true', help='Do the operations on the persistent watermark') - parser.add_argument('-t', '--type', required=True, action='store', - choices=['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all'], - help='The type of watermark') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - args = parser.parse_args() - watermarkstat = Watermarkstat() - - if args.clear: - watermarkstat.send_clear_notification(("PERSISTENT" if args.persistent else "USER", args.type.upper())) - sys.exit(0) - - table_prefix = PERSISTENT_TABLE_PREFIX if args.persistent else USER_TABLE_PREFIX - watermarkstat.print_all_stat(table_prefix, args.type) +@click.command() +@click.option('-c', '--clear', is_flag=True, help='Clear watermarks request') +@click.option('-p', '--persistent', is_flag=True, help='Do the operations on the persistent watermark') +@click.option('-t', '--type', 'wm_type', type=click.Choice(['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all']), help='The type of watermark', required=True) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(clear, persistent, wm_type, namespace): + """ + Display the watermark counters + + Examples: + watermarkstat -t pg_headroom + watermarkstat -t pg_shared + watermarkstat -t q_shared_all + watermarkstat -p -t q_shared_all + watermarkstat -t q_shared_all -c + watermarkstat -t q_shared_uni -c + wwatermarkstat -t q_shared_multi -c + watermarkstat -p -t pg_shared + watermarkstat -p -t q_shared_multi -c + watermarkstat -t buffer_pool + watermarkstat -t buffer_pool -c + watermarkstat -p -t buffer_pool -c + watermarkstat -t pg_headroom -n asic0 + watermarkstat -p -t buffer_pool -c -n asic1 + """ + + namespace_context = WatermarkstatWrapper(namespace) + namespace_context.run(clear, persistent, wm_type) sys.exit(0) - if __name__ == "__main__": main() diff --git a/show/main.py b/show/main.py index 5257b975db..b7e75b24cf 100755 --- a/show/main.py +++ b/show/main.py @@ -783,23 +783,53 @@ def watermark(): # 'unicast' subcommand ("show queue watermarks unicast") @watermark.command('unicast') -def wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_uni(namespace): """Show user WM for unicast queues""" command = ['watermarkstat', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue watermarks multicast") @watermark.command('multicast') -def wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_multi(namespace): """Show user WM for multicast queues""" command = ['watermarkstat', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue watermarks all") @watermark.command('all') -def wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_all(namespace): """Show user WM for all queues""" command = ['watermarkstat', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -813,23 +843,53 @@ def persistent_watermark(): # 'unicast' subcommand ("show queue persistent-watermarks unicast") @persistent_watermark.command('unicast') -def pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_uni(namespace): """Show persistent WM for unicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue persistent-watermarks multicast") @persistent_watermark.command('multicast') -def pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_multi(namespace): """Show persistent WM for multicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue persistent-watermarks all") @persistent_watermark.command('all') -def pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_all(namespace): """Show persistent WM for all queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -846,15 +906,35 @@ def watermark(): pass @watermark.command('headroom') -def wm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_headroom(namespace): """Show user headroom WM for pg""" command = ['watermarkstat', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @watermark.command('shared') -def wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_shared(namespace): """Show user shared WM for pg""" command = ['watermarkstat', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -877,15 +957,36 @@ def persistent_watermark(): pass @persistent_watermark.command('headroom') -def pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_headroom(namespace): """Show persistent headroom WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_shared(namespace): """Show persistent shared WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -898,15 +999,36 @@ def buffer_pool(): """Show details of the buffer pools""" @buffer_pool.command('watermark') -def wm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_buffer_pool(namespace): """Show user WM for buffer pools""" - command = ['watermarkstat', '-t' ,'buffer_pool'] + command = ['watermarkstat', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @buffer_pool.command('persistent-watermark') -def pwm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_buffer_pool(namespace): """Show persistent WM for buffer pools""" command = ['watermarkstat', '-p', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -919,15 +1041,36 @@ def headroom_pool(): """Show details of headroom pool""" @headroom_pool.command('watermark') -def wm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_headroom_pool(namespace): """Show user WM for headroom pool""" command = ['watermarkstat', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def pwm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_headroom_pool(namespace): """Show persistent WM for headroom pool""" command = ['watermarkstat', '-p', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 53e3b558a2..610662a019 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -2202,14 +2202,14 @@ "oid:0x1000000004005": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004006": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004007": "SAI_QUEUE_TYPE_UNICAST", - "oid:0x1000000004008": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004009": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004010": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004011": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004012": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004013": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004014": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004015": "SAI_QUEUE_TYPE_MULTICAST" + "oid:0x1000000004008": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004009": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004010": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004011": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004012": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004013": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004014": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004015": "SAI_QUEUE_TYPE_ALL" }, "COUNTERS_FABRIC_PORT_NAME_MAP" : { "PORT0": "oid:0x1000000000143", @@ -2489,5 +2489,302 @@ "COUNTERS:oid:0x1600000000034d":{ "SAI_COUNTER_STAT_PACKETS": 200, "SAI_COUNTER_STAT_BYTES": 4000 + }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_PG_NAME_MAP": { + "Enternet0:0": "oid:100000000b0f0", + "Enternet0:1": "oid:100000000b0f1", + "Enternet0:2": "oid:100000000b0f2", + "Enternet0:3": "oid:100000000b0f3", + "Enternet0:4": "oid:100000000b0f4", + "Enternet0:5": "oid:100000000b0f5", + "Enternet0:6": "oid:100000000b0f6", + "Enternet0:7": "oid:100000000b0f7", + "Enternet0:8": "oid:100000000b0f8", + "Enternet0:9": "oid:100000000b0f9", + "Enternet0:10": "oid:100000000b0fa", + "Enternet0:11": "oid:100000000b0fb", + "Enternet0:12": "oid:100000000b0fc", + "Enternet0:13": "oid:100000000b0fd", + "Enternet0:14": "oid:100000000b0fe", + "Enternet0:15": "oid:100000000b0ff", + "Enternet4:0": "oid:0x100000000b1f0", + "Enternet4:1": "oid:0x100000000b1f1", + "Enternet4:2": "oid:0x100000000b1f2", + "Enternet4:3": "oid:0x100000000b1f3", + "Enternet4:4": "oid:0x100000000b1f4", + "Enternet4:5": "oid:0x100000000b1f5", + "Enternet4:6": "oid:0x100000000b1f6", + "Enternet4:7": "oid:0x100000000b1f7", + "Enternet4:8": "oid:0x100000000b1f8", + "Enternet4:9": "oid:0x100000000b1f9", + "Enternet4:10": "oid:0x100000000b1fa", + "Enternet4:11": "oid:0x100000000b1fb", + "Enternet4:12": "oid:0x100000000b1fc", + "Enternet4:13": "oid:0x100000000b1fd", + "Enternet4:14": "oid:0x100000000b1fe", + "Enternet4:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000002", + "oid:100000000b0f1": "oid:0x1000000000002", + "oid:100000000b0f2": "oid:0x1000000000002", + "oid:100000000b0f3": "oid:0x1000000000002", + "oid:100000000b0f4": "oid:0x1000000000002", + "oid:100000000b0f5": "oid:0x1000000000002", + "oid:100000000b0f6": "oid:0x1000000000002", + "oid:100000000b0f7": "oid:0x1000000000002", + "oid:100000000b0f8": "oid:0x1000000000002", + "oid:100000000b0f9": "oid:0x1000000000002", + "oid:100000000b0fa": "oid:0x1000000000002", + "oid:100000000b0fb": "oid:0x1000000000002", + "oid:100000000b0fc": "oid:0x1000000000002", + "oid:100000000b0fd": "oid:0x1000000000002", + "oid:100000000b0fe": "oid:0x1000000000002", + "oid:100000000b0ff": "oid:0x1000000000002", + "oid:0x100000000b1f0": "oid:0x1000000000004", + "oid:0x100000000b1f1": "oid:0x1000000000004", + "oid:0x100000000b1f2": "oid:0x1000000000004", + "oid:0x100000000b1f3": "oid:0x1000000000004", + "oid:0x100000000b1f4": "oid:0x1000000000004", + "oid:0x100000000b1f5": "oid:0x1000000000004", + "oid:0x100000000b1f6": "oid:0x1000000000004", + "oid:0x100000000b1f7": "oid:0x1000000000004", + "oid:0x100000000b1f8": "oid:0x1000000000004", + "oid:0x100000000b1f9": "oid:0x1000000000004", + "oid:0x100000000b1fa": "oid:0x1000000000004", + "oid:0x100000000b1fb": "oid:0x1000000000004", + "oid:0x100000000b1fc": "oid:0x1000000000004", + "oid:0x100000000b1fd": "oid:0x1000000000004", + "oid:0x100000000b1fe": "oid:0x1000000000004", + "oid:0x100000000b1ff" : "oid:0x1000000000004" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index f919742157..1455f069c0 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -309,6 +309,111 @@ "oid:0x100000000b1fe": "14", "oid:0x100000000b1ff" : "15" }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_QUEUE_PORT_MAP": { + "oid:0x100000000b100": "oid:0x1000000000b06", + "oid:0x100000000b101": "oid:0x1000000000b06", + "oid:0x100000000b102": "oid:0x1000000000b06", + "oid:0x100000000b103": "oid:0x1000000000b06", + "oid:0x100000000b104": "oid:0x1000000000b06", + "oid:0x100000000b105": "oid:0x1000000000b06", + "oid:0x100000000b106": "oid:0x1000000000b06", + "oid:0x100000000b107": "oid:0x1000000000b06", + "oid:0x100000000b108": "oid:0x1000000000b06", + "oid:0x100000000b109": "oid:0x1000000000b06", + "oid:0x100000000b110": "oid:0x1000000000b06", + "oid:0x100000000b111": "oid:0x1000000000b06", + "oid:0x100000000b112": "oid:0x1000000000b06", + "oid:0x100000000b113": "oid:0x1000000000b06", + "oid:0x100000000b114": "oid:0x1000000000b06", + "oid:0x100000000b115": "oid:0x1000000000b06", + "oid:0x100000000b200": "oid:0x1000000000b08", + "oid:0x100000000b201": "oid:0x1000000000b08", + "oid:0x100000000b202": "oid:0x1000000000b08", + "oid:0x100000000b203": "oid:0x1000000000b08", + "oid:0x100000000b204": "oid:0x1000000000b08", + "oid:0x100000000b205": "oid:0x1000000000b08", + "oid:0x100000000b206": "oid:0x1000000000b08", + "oid:0x100000000b207": "oid:0x1000000000b08", + "oid:0x100000000b208": "oid:0x1000000000b08", + "oid:0x100000000b209": "oid:0x1000000000b08", + "oid:0x100000000b210": "oid:0x1000000000b08", + "oid:0x100000000b211": "oid:0x1000000000b08", + "oid:0x100000000b212": "oid:0x1000000000b08", + "oid:0x100000000b213": "oid:0x1000000000b08", + "oid:0x100000000b214": "oid:0x1000000000b08", + "oid:0x100000000b215": "oid:0x1000000000b08" + }, + "COUNTERS_QUEUE_TYPE_MAP": { + "oid:0x100000000b100": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b101": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b102": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b103": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b104": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b105": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b106": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b107": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b108": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b109": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b110": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b111": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b112": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b113": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b114": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b115": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b200": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b201": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b202": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b203": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b204": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b205": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b206": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b207": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b208": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b209": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b210": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b211": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b212": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b213": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b214": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b215": "SAI_QUEUE_TYPE_ALL" + }, + "COUNTERS_QUEUE_INDEX_MAP": { + "oid:0x100000000b100": "0", + "oid:0x100000000b101": "1", + "oid:0x100000000b102": "2", + "oid:0x100000000b103": "3", + "oid:0x100000000b104": "4", + "oid:0x100000000b105": "5", + "oid:0x100000000b106": "6", + "oid:0x100000000b107": "7", + "oid:0x100000000b108": "8", + "oid:0x100000000b109": "9", + "oid:0x100000000b110": "10", + "oid:0x100000000b111": "11", + "oid:0x100000000b112": "12", + "oid:0x100000000b113": "13", + "oid:0x100000000b114": "14", + "oid:0x100000000b115": "15", + "oid:0x100000000b200": "0", + "oid:0x100000000b201": "1", + "oid:0x100000000b202": "2", + "oid:0x100000000b203": "3", + "oid:0x100000000b204": "4", + "oid:0x100000000b205": "5", + "oid:0x100000000b206": "6", + "oid:0x100000000b207": "7", + "oid:0x100000000b208": "8", + "oid:0x100000000b209": "9", + "oid:0x100000000b210": "10", + "oid:0x100000000b211": "11", + "oid:0x100000000b212": "12", + "oid:0x100000000b213": "13", + "oid:0x100000000b214": "14", + "oid:0x100000000b215": "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", @@ -1262,5 +1367,313 @@ "COUNTERS:oid:0x1600000000034f":{ "SAI_COUNTER_STAT_PACKETS": 1000, "SAI_COUNTER_STAT_BYTES": 2000 + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "USER_WATERMARKS:oid:0x18000000000c10": { + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES": "3000", + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES": "432640" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "USER_WATERMARKS:oid:0x100000000b100": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b101": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b102": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b103": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b104": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b105": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b106": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b107": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "28" + }, + "USER_WATERMARKS:oid:0x100000000b108": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b109": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b110": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "5" + }, + "USER_WATERMARKS:oid:0x100000000b111": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b112": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "208" + }, + "USER_WATERMARKS:oid:0x100000000b113": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b114": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "228" + }, + "USER_WATERMARKS:oid:0x100000000b115": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" } } diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 4ccb392368..379c4e75cd 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -68,6 +68,32 @@ def config_set(self, *args): class MockPubSub: + class MessageList: + """A custom subscriptable class to hold messages in a list-like format""" + def __init__(self, channel): + self._data = [] + self._channel = channel + + def __getitem__(self, index): + return self._data[index] + + def __setitem__(self, index, value): + self._data[index] = value + + def append(self, msg): + print(f"Message published to {self._channel}: ", msg) + self._data.append(msg) + + def __init__(self, namespace): + # Initialize channels required for testing + self.messages = self.MessageList('WATERMARK_CLEAR_REQUEST') + self.channels = {'WATERMARK_CLEAR_REQUEST': self.messages} + self.namespace = namespace + + def __getitem__(self, key): + print("Channel:", key, "accessed in namespace:", self.namespace) + return self.channels[key] + def get_message(self): return None @@ -99,7 +125,7 @@ def __init__(self, *args, **kwargs): db_name = kwargs.pop('db_name') self.decode_responses = kwargs.pop('decode_responses', False) == True fname = db_name.lower() + ".json" - self.pubsub = MockPubSub() + self.pubsub = MockPubSub(namespace) if namespace is not None and namespace is not multi_asic.DEFAULT_NAMESPACE: fname = os.path.join(INPUT_DIR, namespace, fname) diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py index 94bb13011b..2a5e97cfdb 100644 --- a/tests/multi_asic_pgdropstat_test.py +++ b/tests/multi_asic_pgdropstat_test.py @@ -27,18 +27,18 @@ PG14 PG15 -------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ ------ ------ - Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 - Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 -Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A -Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A +Ethernet-BP256 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP260 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 """ diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index fe8b057b5d..992709b3ae 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -89,14 +89,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ @@ -112,14 +112,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ diff --git a/tests/multi_asic_watermarkstat_test.py b/tests/multi_asic_watermarkstat_test.py new file mode 100644 index 0000000000..b3bc011011 --- /dev/null +++ b/tests/multi_asic_watermarkstat_test.py @@ -0,0 +1,145 @@ +import os +import sys +from .wm_input.wm_test_vectors import testData +from .utils import get_result_and_return_code +from click.testing import CliRunner +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestWatermarkstatMultiAsic(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def executor(self, testcase): + runner = CliRunner() + for input in testcase: + if 'clear' in input['cmd']: + exec_cmd = input['cmd'][1:] + print(exec_cmd) + exit_code, output = get_result_and_return_code(exec_cmd) + else: + if len(input['cmd']) == 3: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]].commands[input['cmd'][2]] + else: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + expected_code = 0 if 'rc' not in input else input['rc'] + assert exit_code == expected_code + assert output == input['rc_output'] + + def test_show_pg_shared_one_masic(self): + self.executor(testData['show_pg_wm_shared_one_masic']) + + def test_show_pg_shared_all_masic(self): + self.executor(testData['show_pg_wm_shared_all_masic']) + + def test_show_pg_headroom_wm_one_masic(self): + self.executor(testData['show_pg_wm_hdrm_one_masic']) + + def test_show_pg_headroom_wm_all_masic(self): + self.executor(testData['show_pg_wm_hdrm_all_masic']) + + def test_show_pg_shared_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_shared_one_masic']) + + def test_show_pg_shared_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_shared_all_masic']) + + def test_show_pg_headroom_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_hdrm_one_masic']) + + def test_show_pg_headroom_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_hdrm_all_masic']) + + def test_show_queue_unicast_wm_one_masic(self): + self.executor(testData['show_q_wm_unicast_one_masic']) + + def test_show_queue_unicast_wm_all_masic(self): + self.executor(testData['show_q_wm_unicast_all_masic']) + + def test_show_queue_unicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_unicast_one_masic']) + + def test_show_queue_unicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_unicast_all_masic']) + + def test_show_queue_multicast_wm_one_masic(self): + self.executor(testData['show_q_wm_multicast_one_masic']) + + def test_show_queue_multicast_wm_all_masic(self): + self.executor(testData['show_q_wm_multicast_all_masic']) + + def test_show_queue_multicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_multicast_one_masic']) + + def test_show_queue_multicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_multicast_all_masic']) + + def test_show_queue_all_wm_one_masic(self): + self.executor(testData['show_q_wm_all_one_masic']) + + def test_show_queue_all_wm_all_masic(self): + self.executor(testData['show_q_wm_all_all_masic']) + + def test_show_queue_all_pwm_one_masic(self): + self.executor(testData['show_q_pwm_all_one_masic']) + + def test_show_queue_all_pwm_all_masic(self): + self.executor(testData['show_q_pwm_all_all_masic']) + + def test_show_buffer_pool_wm_one_masic(self): + self.executor(testData['show_buffer_pool_wm_one_masic']) + + def test_show_buffer_pool_wm_all_masic(self): + self.executor(testData['show_buffer_pool_wm_all_masic']) + + def test_show_buffer_pool_pwm_one_masic(self): + self.executor(testData['show_buffer_pool_pwm_one_masic']) + + def test_show_buffer_pool_pwm_all_masic(self): + self.executor(testData['show_buffer_pool_pwm_all_masic']) + + def test_show_headroom_pool_wm_one_masic(self): + self.executor(testData['show_hdrm_pool_wm_one_masic']) + + def test_show_headroom_pool_wm_all_masic(self): + self.executor(testData['show_hdrm_pool_wm_all_masic']) + + def test_show_headroom_pool_pwm_one_masic(self): + self.executor(testData['show_hdrm_pool_pwm_one_masic']) + + def test_show_headroom_pool_pwm_all_masic(self): + self.executor(testData['show_hdrm_pool_pwm_all_masic']) + + def test_show_invalid_namespace_masic(self): + self.executor(testData['show_invalid_namespace_masic']) + + def test_clear_headroom_one_masic(self): + self.executor(testData['clear_hdrm_pool_wm_one_masic']) + + def test_clear_headroom_all_masic(self): + self.executor(testData['clear_hdrm_pool_wm_all_masic']) + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + print("TEARDOWN") diff --git a/tests/watermarkstat_test.py b/tests/watermarkstat_test.py index dc419ae3b9..6a2ebfa2cf 100644 --- a/tests/watermarkstat_test.py +++ b/tests/watermarkstat_test.py @@ -1,11 +1,9 @@ import os import sys import pytest - import show.main as show from click.testing import CliRunner - -from .wm_input.wm_test_vectors import * +from wm_input.wm_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -84,12 +82,14 @@ def executor(self, testcase): else: exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] - result = runner.invoke(exec_cmd, []) + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) print(result.exit_code) print(result.output) - assert result.exit_code == 0 + expected_code = 0 if 'rc' not in input else input['rc'] + assert result.exit_code == expected_code assert result.output == input['rc_output'] @classmethod diff --git a/tests/wm_input/wm_test_vectors.py b/tests/wm_input/wm_test_vectors.py index 93d9faa4cb..f0a80cf9cb 100644 --- a/tests/wm_input/wm_test_vectors.py +++ b/tests/wm_input/wm_test_vectors.py @@ -1,3 +1,373 @@ +show_pg_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n") + +show_pg_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 " + "PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_persistent_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7" + " PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207 500" + " 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 " + "PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207 " + "500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A " + "N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_queue_wm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------- ------ ------ ------ ------ +Ethernet-BP256 2 0 5 2057328 208 20 228 2 +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------ ------ ------ ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_buffer_pool_wm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_wm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_pwm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_buffer_pool_pwm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_wm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_wm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_pwm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_pwm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +clear_hdrm_pool_wm_output_one_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + +clear_hdrm_pool_wm_output_all_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic1 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + show_pg_wm_shared_output="""\ Ingress shared pool occupancy per PG: Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 @@ -124,56 +494,198 @@ 'rc_output': show_pg_wm_hdrm_output } ], - 'show_pg_pwm_shared' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'shared'], - 'rc_output': show_pg_persistent_wm_shared_output - } - ], - 'show_pg_pwm_hdrm' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'headroom'], - 'rc_output': show_pg_persistent_wm_hdrm_output - } - ], - 'show_q_wm_unicast' : [ {'cmd' : ['queue', 'watermark', 'unicast'], - 'rc_output': show_queue_wm_unicast_output + 'show_pg_pwm_shared': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output } ], - 'show_q_pwm_unicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'unicast'], - 'rc_output': show_queue_pwm_unicast_output - } + 'show_pg_pwm_hdrm': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output + } + ], + 'show_q_wm_unicast': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output + } + ], + 'show_q_pwm_unicast': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output + } + ], + 'show_q_wm_multicast': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_q_wm_multicast' : [ {'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_multicast_neg' : [ { 'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_neg_output - } + 'show_q_wm_multicast_neg': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_neg_output + } ], - 'show_q_pwm_multicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_all' : [ {'cmd' : ['queue', 'watermark', 'all'], - 'rc_output': show_queue_wm_all_output - } - ], - 'show_q_pwm_all' : [ {'cmd' : ['queue', 'persistent-watermark', 'all'], - 'rc_output': show_queue_pwm_all_output - } - ], - 'show_buffer_pool_wm' : [ {'cmd' : ['buffer_pool', 'watermark'], - 'rc_output': show_buffer_pool_wm_output - } + 'show_q_pwm_multicast': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_buffer_pool_pwm' : [ {'cmd' : ['buffer_pool', 'persistent-watermark'], - 'rc_output': show_buffer_pool_persistent_wm_output - } - ], - 'show_hdrm_pool_wm' : [ {'cmd' : ['headroom-pool', 'watermark'], - 'rc_output': show_hdrm_pool_wm_output + 'show_q_wm_all': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output + } + ], + 'show_q_pwm_all': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output + } + ], + 'show_buffer_pool_wm': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output } - ], - 'show_hdrm_pool_pwm' : [ {'cmd' : ['headroom-pool', 'persistent-watermark'], - 'rc_output': show_hdrm_pool_persistent_wm_output + ], + 'show_buffer_pool_pwm': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_persistent_wm_output } - ] + ], + 'show_hdrm_pool_wm': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output + } + ], + 'show_hdrm_pool_pwm': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_persistent_wm_output + } + ], + 'show_pg_wm_shared_one_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_pg_wm_shared_output_one_masic + } + ], + 'show_pg_wm_shared_all_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'rc_output': show_pg_wm_shared_output_all_masic + } + ], + 'show_pg_wm_hdrm_one_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_wm_hdrm_output_one_masic + } + ], + 'show_pg_wm_hdrm_all_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'rc_output': show_pg_wm_hdrm_output_all_masic + } + ], + 'show_pg_pwm_shared_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_shared_output_one_masic + } + ], + 'show_pg_pwm_shared_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output_all_masic + } + ], + 'show_pg_pwm_hdrm_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_hdrm_output_one_masic + } + ], + 'show_pg_pwm_hdrm_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output_all_masic + } + ], + 'show_q_wm_unicast_one_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_unicast_output_one_masic + } + ], + 'show_q_wm_unicast_all_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output_all_masic + } + ], + 'show_q_pwm_unicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_unicast_output_one_masic + } + ], + 'show_q_pwm_unicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output_all_masic + } + ], + 'show_q_wm_multicast_one_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_wm_multicast_output_one_masic + } + ], + 'show_q_wm_multicast_all_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output_all_masic + } + ], + 'show_q_pwm_multicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_pwm_multicast_output_one_masic + } + ], + 'show_q_pwm_multicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_pwm_multicast_output_all_masic + } + ], + 'show_q_wm_all_one_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_all_output_one_masic + } + ], + 'show_q_wm_all_all_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output_all_masic + } + ], + 'show_q_pwm_all_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_all_output_one_masic + } + ], + 'show_q_pwm_all_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output_all_masic + } + ], + 'show_buffer_pool_wm_one_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_wm_output_one_masic + } + ], + 'show_buffer_pool_wm_all_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output_all_masic + } + ], + 'show_buffer_pool_pwm_one_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_pwm_output_one_masic + } + ], + 'show_buffer_pool_pwm_all_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_pwm_output_all_masic + } + ], + 'show_hdrm_pool_wm_one_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_wm_output_one_masic + } + ], + 'show_hdrm_pool_wm_all_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output_all_masic + } + ], + 'show_hdrm_pool_pwm_one_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_pwm_output_one_masic + } + ], + 'show_hdrm_pool_pwm_all_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_pwm_output_all_masic + } + ], + 'show_invalid_namespace_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic14'], + 'rc': 2, + 'rc_output': '' + } + ], + 'clear_hdrm_pool_wm_one_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-n', 'asic0', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_one_masic + } + ], + 'clear_hdrm_pool_wm_all_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_all_masic + } + ] } From 867fc5400e7c53e068bf61e7c4c9e33b54a3fea3 Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 18 Sep 2024 18:12:42 -0500 Subject: [PATCH 13/13] [DASH] Add support for ENI counters (#3496) ### What I did Updated counterpoll cli to support configuration for ENI counters ``` root@dpu:/home/admin# counterpoll eni enable root@dpu:/home/admin# counterpoll eni interval 1000 root@dpu:/home/admin# counterpoll show Type Interval (in ms) Status -------------------------- ------------------ -------- QUEUE_STAT default (10000) enable PORT_STAT default (1000) enable PORT_BUFFER_DROP default (60000) enable RIF_STAT default (1000) enable QUEUE_WATERMARK_STAT default (60000) enable PG_WATERMARK_STAT default (60000) enable PG_DROP_STAT default (10000) enable BUFFER_POOL_WATERMARK_STAT default (60000) enable ACL 10000 enable ENI_STAT 1000 enable ``` --- counterpoll/main.py | 59 ++++++++++++++++++++++++++++++++ tests/counterpoll_test.py | 55 +++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 4 +++ 3 files changed, 118 insertions(+) diff --git a/counterpoll/main.py b/counterpoll/main.py index ad15c8c248..530281188f 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -3,17 +3,29 @@ from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate +from sonic_py_common import device_info BUFFER_POOL_WATERMARK = "BUFFER_POOL_WATERMARK" PORT_BUFFER_DROP = "PORT_BUFFER_DROP" PG_DROP = "PG_DROP" ACL = "ACL" +ENI = "ENI" DISABLE = "disable" ENABLE = "enable" DEFLT_60_SEC= "default (60000)" DEFLT_10_SEC= "default (10000)" DEFLT_1_SEC = "default (1000)" + +def is_dpu(db): + """ Check if the device is DPU """ + platform_info = device_info.get_platform_info(db) + if platform_info.get('switch_type') == 'dpu': + return True + else: + return False + + @click.group() def cli(): """ SONiC Static Counter Poll configurations """ @@ -126,6 +138,7 @@ def disable(): port_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info) + # Ingress PG drop packet stat @cli.group() @click.pass_context @@ -382,6 +395,47 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +# ENI counter commands +@cli.group() +@click.pass_context +def eni(ctx): + """ ENI counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + if not is_dpu(ctx.obj): + click.echo("ENI counters are not supported on non DPU platforms") + exit(1) + + +@eni.command(name='interval') +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def eni_interval(ctx, poll_interval): + """ Set eni counter query interval """ + eni_info = {} + eni_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='enable') +@click.pass_context +def eni_enable(ctx): + """ Enable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='disable') +@click.pass_context +def eni_disable(ctx): + """ Disable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + @cli.command() def show(): """ Show the counter configuration """ @@ -399,6 +453,7 @@ def show(): tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') + eni_info = configdb.get_entry('FLEX_COUNTER_TABLE', ENI) header = ("Type", "Interval (in ms)", "Status") data = [] @@ -428,6 +483,10 @@ def show(): data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if is_dpu(config_db) and eni_info: + data.append(["ENI_STAT", eni_info.get("POLL_INTERVAL", DEFLT_10_SEC), + eni_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) def _update_config_db_flex_counter_table(status, filename): diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 4a4da07ee9..6c165498c5 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -2,6 +2,7 @@ import json import os import pytest +import mock import sys from click.testing import CliRunner from shutil import copyfile @@ -31,6 +32,21 @@ FLOW_CNT_ROUTE_STAT 10000 enable """ +expected_counterpoll_show_dpu = """Type Interval (in ms) Status +-------------------- ------------------ -------- +QUEUE_STAT 10000 enable +PORT_STAT 1000 enable +PORT_BUFFER_DROP 60000 enable +QUEUE_WATERMARK_STAT default (60000) enable +PG_WATERMARK_STAT default (60000) enable +PG_DROP_STAT 10000 enable +ACL 5000 enable +TUNNEL_STAT 3000 enable +FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable +ENI_STAT 1000 enable +""" + class TestCounterpoll(object): @classmethod def setup_class(cls): @@ -44,6 +60,13 @@ def test_show(self): print(result.output) assert result.output == expected_counterpoll_show + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_show_dpu(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + result = runner.invoke(counterpoll.cli.commands["show"], []) + assert result.output == expected_counterpoll_show_dpu + def test_port_buffer_drop_interval(self): runner = CliRunner() result = runner.invoke(counterpoll.cli.commands["port-buffer-drop"].commands["interval"], ["30000"]) @@ -221,6 +244,38 @@ def test_update_route_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_eni_status(self, status): + runner = CliRunner() + result = runner.invoke(counterpoll.cli, ["eni", status]) + assert result.exit_code == 1 + assert result.output == "ENI counters are not supported on non DPU platforms\n" + + @pytest.mark.parametrize("status", ["disable", "enable"]) + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_status_dpu(self, mock_get_platform_info, status): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["eni"].commands[status], [], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["ENI"]["FLEX_COUNTER_STATUS"] + + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_interval(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + test_interval = "2000" + + result = runner.invoke(counterpoll.cli.commands["eni"].commands["interval"], [test_interval], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["ENI"]["POLL_INTERVAL"] @classmethod def teardown_class(cls): diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 108fa7593d..187efed553 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1785,6 +1785,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|ENI": { + "POLL_INTERVAL": "1000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600",