Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 21 additions & 11 deletions tests/common/devices/sonic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2405,7 +2405,7 @@ def is_backend_portchannel(self, port_channel, mg_facts):
def is_backend_port(self, port, mg_facts):
return True if "Ethernet-BP" in port else False

def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_num="all"):
def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_num="all", ip_type="ipv4"):
"""
Return a dict of active IP (Ethernet or PortChannel) interfaces, with
interface and peer IPv4 address.
Expand All @@ -2421,16 +2421,26 @@ def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_nu
if ((k.startswith("Ethernet") and config_facts_ports.get(k, {}).get("role", "") != "Dpc" and
(not k.startswith("Ethernet-BP")) and not is_inband_port(k)) or
(k.startswith("PortChannel") and not self.is_backend_portchannel(k, mg_facts))):
# Ping for some time to get ARP Re-learnt.
# We might have to tune it further if needed.
if (v["admin"] == "up" and v["oper_state"] == "up" and
self.ping_v4(v["peer_ipv4"], count=3, ns_arg=ns_arg)):
ip_ifaces[k] = {
"ipv4": v["ipv4"],
"peer_ipv4": v["peer_ipv4"],
"bgp_neighbor": v["bgp_neighbor"]
}
active_ip_intf_cnt += 1
if ip_type == "ipv4":
# Ping for some time to get ARP Re-learnt.
# We might have to tune it further if needed.
if (v["admin"] == "up" and v["oper_state"] == "up" and
self.ping_v4(v["peer_ipv4"], count=3, ns_arg=ns_arg)):
ip_ifaces[k] = {
"ipv4": v["ipv4"],
"peer_ipv4": v["peer_ipv4"],
"bgp_neighbor": v["bgp_neighbor"]
}
active_ip_intf_cnt += 1
elif ip_type == "ipv6":
if (v["admin"] == "up" and v["oper_state"] == "up" and
self.ping_v6(v["peer_ipv6"], count=3, ns_arg=ns_arg)):
ip_ifaces[k] = {
"ipv6": v["ipv6"],
"peer_ipv6": v["peer_ipv6"],
"bgp_neighbor": v["bgp_neighbor"]
}
active_ip_intf_cnt += 1

if isinstance(intf_num, int) and intf_num > 0 and active_ip_intf_cnt == intf_num:
break
Expand Down
12 changes: 9 additions & 3 deletions tests/common/devices/sonic_asic.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,17 +321,23 @@ def is_backend_portchannel(self, port_channel):
return False
return True

def get_active_ip_interfaces(self, tbinfo, intf_num="all"):
def get_active_ip_interfaces(self, tbinfo, intf_num="all", ip_type="ipv4"):
"""
Return a dict of active IP (Ethernet or PortChannel) interfaces, with
interface and peer IPv4 address.

Returns:
Dict of Interfaces and their IPv4 address
"""
ip_ifs = self.show_ip_interface()["ansible_facts"]["ip_interfaces"]
if ip_type == "ipv4":
ip_ifs = self.show_ip_interface()["ansible_facts"]["ip_interfaces"]
elif ip_type == "ipv6":
ip_ifs = self.show_ipv6_interface()["ansible_facts"]["ipv6_interfaces"]
else:
raise ValueError("Invalid IP type: {}".format(ip_type))

return self.sonichost.active_ip_interfaces(
ip_ifs, tbinfo, self.namespace, intf_num=intf_num
ip_ifs, tbinfo, self.namespace, intf_num=intf_num, ip_type=ip_type
)

def bgp_drop_rule(self, ip_version, state="present"):
Expand Down
113 changes: 89 additions & 24 deletions tests/qos/qos_sai_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@
from tests.common.snappi_tests.qos_fixtures import get_pfcwd_config, reapply_pfcwd
from tests.common.snappi_tests.common_helpers import \
stop_pfcwd, disable_packet_aging, enable_packet_aging
from tests.common.utilities import is_ipv6_only_topology


logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -150,8 +152,11 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False, pdb=False
Raises:
RunAnsibleModuleFail if ptf test fails
"""
custom_options = " --disable-ipv6 --disable-vxlan --disable-geneve" \
ip_type = testParams.get("ip_type", "ipv4")
custom_options = " --disable-vxlan --disable-geneve" \
" --disable-erspan --disable-mpls --disable-nvgre"
if ip_type != "ipv6":
custom_options += " --disable-ipv6"
# Append a suffix to the logfile name if log_suffix is present in testParams
log_suffix = testParams.get("log_suffix", "")
logfile_suffix = "_{0}".format(log_suffix) if log_suffix else ""
Expand Down Expand Up @@ -959,7 +964,7 @@ def configure_ip_on_ptf_intfs(self, ptfhost, get_src_dst_asic_and_duts, tbinfo):
def dutConfig(
self, request, duthosts, configure_ip_on_ptf_intfs, get_src_dst_asic_and_duts,
lower_tor_host, tbinfo, dualtor_ports_for_duts, dut_qos_maps, # noqa: F811
is_supported_per_dir, lossy_queue_traffic_direction
is_supported_per_dir, lossy_queue_traffic_direction, ip_type
):
"""
Build DUT host config pertaining to QoS SAI tests
Expand Down Expand Up @@ -1006,6 +1011,9 @@ def dutConfig(
for key, value in src_mgFacts['minigraph_ports'].items()
if not key.startswith("Ethernet-BP")
}
bgp_peer_ip_key = "peer_ipv6" if ip_type == "ipv6" else "peer_ipv4"
ip_version = 6 if ip_type == "ipv6" else 4
vlan_info = {}

# LAG ports in T1 TOPO need to be removed in Mellanox devices
if topo in self.SUPPORTED_T0_TOPOS or (topo in self.SUPPORTED_PTF_TOPOS and isMellanoxDevice(src_dut)):
Expand All @@ -1021,6 +1029,7 @@ def dutConfig(
dutLagInterfaces.append(src_mgFacts["minigraph_ptf_indices"][intf])

config_facts = duthosts.config_facts(host=src_dut.hostname, source="running")
vlan_info = config_facts[src_dut.hostname]['VLAN']
port_speeds = self.__buildPortSpeeds(config_facts[src_dut.hostname])
low_speed_portIds = []
if src_dut.facts['hwsku'] in self.BREAKOUT_SKUS and 'backend' not in topo:
Expand Down Expand Up @@ -1054,7 +1063,7 @@ def dutConfig(
for portConfig in intf_map:
intf = portConfig["attachto"].split(".")[0]
portIndex = src_mgFacts["minigraph_ptf_indices"][intf]
if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4:
if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == ip_version:
if portIndex in testPortIds[src_dut_index][src_asic_index]:
portIpMap = {'peer_addr': portConfig["peer_addr"]}
if 'vlan' in portConfig:
Expand Down Expand Up @@ -1114,14 +1123,14 @@ def dutConfig(
testPortIds[src_dut_index] = {}
for dut_asic in get_src_dst_asic_and_duts['all_asics']:
dutPortIps[src_dut_index][dut_asic.asic_index] = {}
for iface, addr in dut_asic.get_active_ip_interfaces(tbinfo).items():
for iface, addr in dut_asic.get_active_ip_interfaces(tbinfo, ip_type=ip_type).items():
vlan_id = None
if iface.startswith("Ethernet"):
portName = iface
if "." in iface:
portName, vlan_id = iface.split(".")
portIndex = src_mgFacts["minigraph_ptf_indices"][portName]
portIpMap = {'peer_addr': addr["peer_ipv4"]}
portIpMap = {'peer_addr': addr[bgp_peer_ip_key]}
if vlan_id is not None:
portIpMap['vlan_id'] = vlan_id
dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap})
Expand All @@ -1130,7 +1139,7 @@ def dutConfig(
iter(src_mgFacts["minigraph_portchannels"][iface]["members"])
)
portIndex = src_mgFacts["minigraph_ptf_indices"][portName]
portIpMap = {'peer_addr': addr["peer_ipv4"]}
portIpMap = {'peer_addr': addr[bgp_peer_ip_key]}
dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap})
# If the leaf router is using separated DSCP_TO_TC_MAP on uplink/downlink ports.
# we also need to test them separately
Expand All @@ -1142,11 +1151,11 @@ def dutConfig(
neighName = src_mgFacts["minigraph_neighbors"].get(portName, {}).get("name", "").lower()
if 't0' in neighName:
downlinkPortIds.append(portIndex)
downlinkPortIps.append(addr["peer_ipv4"])
downlinkPortIps.append(addr[bgp_peer_ip_key])
downlinkPortNames.append(portName)
elif 't2' in neighName:
uplinkPortIds.append(portIndex)
uplinkPortIps.append(addr["peer_ipv4"])
uplinkPortIps.append(addr[bgp_peer_ip_key])
uplinkPortNames.append(portName)

testPortIds[src_dut_index][dut_asic.asic_index] = sorted(
Expand Down Expand Up @@ -1387,7 +1396,9 @@ def dutConfig(
"srcDutInstance": src_dut,
"dstDutInstance": dst_dut,
"dualTor": request.config.getoption("--qos_dual_tor"),
"dualTorScenario": len(dualtor_ports_for_duts) != 0 and "dualtor" not in tbinfo["topo"]["name"]
"dualTorScenario": len(dualtor_ports_for_duts) != 0 and "dualtor" not in tbinfo["topo"]["name"],
"ip_type": ip_type,
"vlan_info": vlan_info
}

@pytest.fixture(scope='class')
Expand Down Expand Up @@ -1936,6 +1947,42 @@ def handleFdbAging(self, duthosts, get_src_dst_asic_and_duts):
self.__loadSwssConfig(duthost)
self.__deleteTmpSwitchConfig(duthost)

@pytest.fixture(scope='class', autouse=True)
def update_delay_first_probe_time_for_v6_top(self, get_src_dst_asic_and_duts, tbinfo, dutConfig):
"""
Update delay first probe time for v6 t0 topology.
Because when generating arp by sending NS packet, the default delay first probe time is 5 seconds,
which is too short to let the ip neighbor status changed from delay to probe, then to fail.
Therefore, we need to update the delay first probe time to a very large value
so that the arp entries can work during executing the qos sai case.
"""
ip_type = dutConfig.get('ip_type', 'ipv4')
if ip_type != 'ipv6' or 't0' not in tbinfo["topo"]["type"]:
yield
return

Vlan_name = list(dutConfig['vlan_info'].keys())[0]
dut_asic = get_src_dst_asic_and_duts['src_asic']
file_path_v6_delay_first_probe_time = f"/proc/sys/net/ipv6/neigh/{Vlan_name}/delay_first_probe_time"
cmd_get_v6_delay_first_probe_time = f"cat {file_path_v6_delay_first_probe_time}"

# a very large value 100000 seconds which far exceeds the qos sai case execution time
# so that the status of tested ip v6 neighbor item not be changed from delay to probe, then to fail.
new_v6_delay_first_probe_time = 100000

original_v6_delay_first_probe_time = dut_asic.shell(cmd_get_v6_delay_first_probe_time)['stdout']

cmd_update_v6_delay_first_probe_time = \
f"echo {new_v6_delay_first_probe_time} | sudo tee {file_path_v6_delay_first_probe_time}"
dut_asic.shell(cmd_update_v6_delay_first_probe_time)

yield

cmd_restore_v6_delay_first_probe_time = \
f"echo {original_v6_delay_first_probe_time} | sudo tee {file_path_v6_delay_first_probe_time}"
dut_asic.shell(cmd_restore_v6_delay_first_probe_time)
return

@pytest.fixture(scope='function', autouse=True)
def populateArpEntries_T2(
self, duthosts, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig):
Expand Down Expand Up @@ -1983,7 +2030,8 @@ def populateArpEntries_T2(
@pytest.fixture(scope='class', autouse=True)
def populateArpEntries(
self, duthosts, get_src_dst_asic_and_duts, lossy_queue_traffic_direction,
ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host # noqa: F811
ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host, # noqa: F811
ip_type, update_delay_first_probe_time_for_v6_top # noqa: F811
):
"""
Update ARP entries of QoS SAI test ports
Expand Down Expand Up @@ -2012,13 +2060,19 @@ def populateArpEntries(

self.populate_arp_entries(
get_src_dst_asic_and_duts, ptfhost, dutTestParams,
dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host)
dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host, ip_type)

yield
return

@pytest.fixture(scope='module', autouse=True)
def dut_disable_ipv6(self, duthosts, tbinfo, lower_tor_host, swapSyncd_on_selected_duts): # noqa: F811
def dut_disable_ipv6(self, duthosts, tbinfo, lower_tor_host, swapSyncd_on_selected_duts, ip_type): # noqa: F811

if ip_type == "ipv6":
logger.info("skip dut_disable_ipv6 fixture for ipv6")
yield
return

if 'dualtor' in tbinfo['topo']['name']:
dut_list = [lower_tor_host]
else:
Expand Down Expand Up @@ -2677,7 +2731,8 @@ def skip_pacific_dst_asic(self, dutConfig):

def populate_arp_entries(
self, get_src_dst_asic_and_duts,
ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host # noqa: F811
ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host, # noqa: F811
ip_type='ipv4'
):
"""
Update ARP entries of QoS SAI test ports
Expand All @@ -2688,24 +2743,29 @@ def populate_arp_entries(
dut_asic.command('sonic-clear arp')

saiQosTest = None
if dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulate"
elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulatePTF"
if ip_type == "ipv6":
saiQosTest = "sai_qos_tests.ARPpopulateIPv6"
else:
for dut_asic in get_src_dst_asic_and_duts['all_asics']:
result = dut_asic.command("arp -n")
pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(dut_asic.sonichost.hostname))
if result["stdout"].find("incomplete") == -1:
saiQosTest = "sai_qos_tests.ARPpopulate"
if dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulate"
elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulatePTF"
else:
for dut_asic in get_src_dst_asic_and_duts['all_asics']:
result = dut_asic.command("arp -n")
pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(
dut_asic.sonichost.hostname))
if result["stdout"].find("incomplete") == -1:
saiQosTest = "sai_qos_tests.ARPpopulate"

if saiQosTest:
testParams = dutTestParams["basicParams"]
testParams.update(dutConfig["testPorts"])
testParams.update({
"testPortIds": dutConfig["testPortIds"],
"testPortIps": dutConfig["testPortIps"],
"testbed_type": dutTestParams["topo"]
"testbed_type": dutTestParams["topo"],
"ip_type": ip_type
})
self.runPtfTest(
ptfhost, testCase=saiQosTest, testParams=testParams
Expand Down Expand Up @@ -3013,6 +3073,11 @@ def lossy_queue_traffic_direction(self, is_supported_per_dir):
logging.info(f"Device not support per dir: {lossy_queue_dir_test}")
yield lossy_queue_dir_test

@pytest.fixture(scope='module', autouse=True)
def ip_type(self, tbinfo):
ip_type = "ipv6" if is_ipv6_only_topology(tbinfo) else "ipv4"
yield ip_type

def get_src_and_dst_ports_when_support_per_dir(self, uplinkPortIds, downlinkPortIds, lossy_queue_traffic_direction):
if 'src_uplink_dst_downlink' == lossy_queue_traffic_direction:
src_port_ids = uplinkPortIds
Expand Down Expand Up @@ -3069,7 +3134,7 @@ def get_queue_weights_based_dynamic_th(self, duthost, queue_table_postfix_list):
queue_dynamic_th_map = {}
weights_list = []
for queue in queue_table_postfix_list:
key_str = f"BUFFER_PROFILE_TABLE:queue{queue}_downlink_lossy_profile"
key_str = f"BUFFER_PROFILE_TABLE:queue{queue}_downlink_lossy_profile" # noqa: E231
dynamic_th_res = duthost.run_redis_cmd(argv=["redis-cli", "-n", 0, "HGET", key_str, "dynamic_th"])
if dynamic_th_res:
queue_dynamic_th_map[queue] = dynamic_th_res[0]
Expand Down
12 changes: 8 additions & 4 deletions tests/qos/test_qos_sai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1308,7 +1308,8 @@ def testQosSaiLossyQueue(
"src_port_vlan": dutConfig["testPorts"]["src_port_vlan"],
"pkts_num_leak_out": dutQosConfig["param"][portSpeedCableLength]["pkts_num_leak_out"],
"pkts_num_trig_egr_drp": qosConfig["lossy_queue_1"]["pkts_num_trig_egr_drp"],
"hwsku": dutTestParams['hwsku']
"hwsku": dutTestParams['hwsku'],
"ip_type": dutConfig["ip_type"]
})

if "platform_asic" in dutTestParams["basicParams"]:
Expand Down Expand Up @@ -1476,7 +1477,8 @@ def testQosSaiDscpQueueMapping(
"hwsku": dutTestParams['hwsku'],
"dual_tor": dutConfig['dualTor'],
"dual_tor_scenario": dutConfig['dualTorScenario'],
"tc_to_dscp_count_map": tc_to_dscp_count
"tc_to_dscp_count_map": tc_to_dscp_count,
'ip_type': dutConfig["ip_type"]
})

if "platform_asic" in dutTestParams["basicParams"]:
Expand Down Expand Up @@ -1783,7 +1785,8 @@ def testQosSaiPgSharedWatermark(
"pkts_num_fill_min": qosConfig[pgProfile]["pkts_num_fill_min"],
"pkts_num_fill_shared": pktsNumFillShared,
"cell_size": qosConfig[pgProfile]["cell_size"],
"hwsku": dutTestParams['hwsku']
"hwsku": dutTestParams['hwsku'],
"ip_type": dutConfig["ip_type"]
})

if "platform_asic" in dutTestParams["basicParams"]:
Expand Down Expand Up @@ -1992,7 +1995,8 @@ def testQosSaiQSharedWatermark(
"pkts_num_trig_drp": triggerDrop,
"cell_size": qosConfig[queueProfile]["cell_size"],
"hwsku": dutTestParams['hwsku'],
"dut_asic": dutConfig["dutAsic"]
"dut_asic": dutConfig["dutAsic"],
"ip_type": dutConfig["ip_type"]
})

if "platform_asic" in dutTestParams["basicParams"]:
Expand Down
Loading