From dd76c1d863c7fa6f80c5efff2f4f0a005485f4fe Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Wed, 13 Mar 2024 03:14:30 -0700 Subject: [PATCH 1/6] Added testcase to run traffic in full mesh from all src to all dst port pair. After all src inject traffic to a given dst, queue counters are verified for any drop --- .../tests_mark_conditions.yaml | 2 +- tests/qos/qos_sai_base.py | 8 +- tests/qos/test_qos_sai.py | 140 +++++++++++++ tests/saitests/py3/sai_qos_tests.py | 189 +++++++++++++++++- 4 files changed, 334 insertions(+), 5 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 11f1d880238..212a20c7297 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1048,7 +1048,7 @@ qos/test_qos_sai.py::TestQosSai: skip: reason: "Unsupported testbed type." conditions: - - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic', 'ptf64'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testIPIPQosSaiDscpToPgMapping: skip: diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 95525307aa2..cc0b3aa1872 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -123,13 +123,17 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False): """ custom_options = " --disable-vxlan --disable-geneve" \ " --disable-erspan --disable-mpls --disable-nvgre" + # Append a suffix to the logfile name if log_suffix is present in testParams + log_suffix = testParams.get("log_suffix", "") + logfile_suffix = "_{0}".format(log_suffix) if log_suffix else "" + ptf_runner( ptfhost, "saitests", testCase, platform_dir="ptftests", params=testParams, - log_file="/tmp/{0}.log".format(testCase), + log_file="/tmp/{0}{1}.log".format(testCase, logfile_suffix), # Include suffix in the logfile name, qlen=10000, is_python3=True, relax=relax, @@ -890,7 +894,7 @@ def dutConfig( if len(dutPortIps[src_dut_index][src_asic_index]) != 0: testPortIps.update(dutPortIps) - elif topo in self.SUPPORTED_T1_TOPOS: + elif topo in self.SUPPORTED_T1_TOPOS or (topo in self.SUPPORTED_PTF_TOPOS and is_cisco_device(src_dut)): # T1 is supported only for 'single_asic' or 'single_dut_multi_asic'. # So use src_dut as the dut use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(dut_qos_maps) diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index fd96df188fe..e282d5e9b36 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -2254,3 +2254,143 @@ def testQosSaiLossyQueueVoqMultiSrc( ptfhost, testCase="sai_qos_tests.LossyQueueVoqMultiSrcTest", testParams=testParams ) + + def testQosSaiFullMeshTrafficSanity( + self, ptfhost, dutTestParams, dutConfig, dutQosConfig, + get_src_dst_asic_and_duts, dut_qos_maps, duthosts, # noqa F811 + enum_frontend_dut_hostname, enum_frontend_asic_index + ): + """ + Test QoS SAI traffic sanity + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + Returns: + None + Raises: + RunAnsibleModuleFail if ptf test fails + """ + # Concurrent execution with a specific set of dst port + def run_test_for_dst_port(start, end): + test_params = dict() + test_params.update(dutTestParams["basicParams"]) + test_params.update({ + "testbed_type": dutTestParams["topo"], + "all_src_port_id_to_ip": all_src_port_id_to_ip, + "all_src_port_id_to_name": all_src_port_id_to_name, + "all_dst_port_id_to_ip": {port_id: all_dst_port_id_to_ip[port_id] for port_id in range(start, end)}, + "all_dst_port_id_to_name": {port_id: all_dst_port_id_to_name[port_id] for port_id in range(start, end)}, + "dscp_to_q_map": dscp_to_q_map, + # Add a log_suffix to have separate log and pcap file name + "log_suffix": "_".join([str(port_id) for port_id in range(start, end)]), + "hwsku": dutTestParams['hwsku'] + }) + + self.runPtfTest(ptfhost, testCase="sai_qos_tests.FullMeshTrafficSanity", testParams=test_params) + + def generate_ip_address(base_ip, new_first_octet): + octets = base_ip.split('.') + if len(octets) != 4: + raise ValueError("Invalid IP address format") + octets[0] = str(new_first_octet) + octets[2] = octets[3] + octets[3] = '1' + return '.'.join(octets) + + def combine_ips(src_ips, dst_ips, new_first_octet): + combined_ips_map = {} + + for key, src_info in src_ips.items(): + src_ip = src_info['peer_addr'] + new_ip = generate_ip_address(src_ip, new_first_octet) + combined_ips_map[key] = {'original_ip': src_ip, 'generated_ip': new_ip} + + for key, dst_info in dst_ips.items(): + dst_ip = dst_info['peer_addr'] + new_ip = generate_ip_address(dst_ip, new_first_octet) + combined_ips_map[key] = {'original_ip': dst_ip, 'generated_ip': new_ip} + + return combined_ips_map + + def set_static_route(combined_ips_map, add_route=True): + action = "add" if add_route else "del" + for port, entry in combined_ips_map.items(): + if enum_frontend_asic_index is None: + duthost.shell("config route {} prefix {}.0/24 nexthop {}".format( + action, '.'.join(entry['generated_ip'].split('.')[:3]), entry['original_ip'])) + else: + duthost.shell("ip netns exec asic{} config route {} prefix {}.0/24 nexthop {}".format( + enum_frontend_asic_index, + action, '.'.join(entry['generated_ip'].split('.')[:3]), + entry['original_ip']) + ) + + if dutTestParams["basicParams"]["sonic_asic_type"] != "cisco-8000": + pytest.skip("Traffic sanity test is not supported") + + duthost = duthosts[enum_frontend_dut_hostname] + + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + + src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] + dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index] + + new_first_octet = 100 + combined_ips_map = combine_ips(src_testPortIps, dst_testPortIps, new_first_octet) + + # Fetch all port IDs and IPs + all_src_port_id_to_ip = {port_id: src_testPortIps[port_id]['peer_addr'] for port_id in src_testPortIps.keys()} + + all_src_port_id_to_name = { + port_id: dutConfig["dutInterfaces"][port_id] + for port_id in all_src_port_id_to_ip.keys() + } + + all_dst_port_id_to_ip = { + port_id: combined_ips_map[port_id]['generated_ip'] + for port_id in dst_testPortIps.keys() + } + + all_dst_port_id_to_name = { + port_id: dutConfig["dutInterfaces"][port_id] + for port_id in all_dst_port_id_to_ip.keys() + } + + try: + tc_to_q_map = dut_qos_maps['tc_to_queue_map']['AZURE'] + tc_to_dscp_map = {v: k for k, v in dut_qos_maps['dscp_to_tc_map']['AZURE'].items()} + except KeyError: + pytest.skip( + "Need both TC_TO_PRIORITY_GROUP_MAP and DSCP_TO_TC_MAP" + "and key AZURE to run this test.") + + if (dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS or + dutTestParams["topo"] in self.SUPPORTED_T1_TOPOS or + dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS): + dscp_to_q_map = {tc_to_dscp_map[tc]: tc_to_q_map[tc] for tc in tc_to_dscp_map if tc != 7} + else: + pytest.skip("Test not supported in T2 topology") + + set_static_route(combined_ips_map, True) + + # Define the number of splits + # for the dst port list + num_splits = 4 + + # Get all keys and sort them + all_keys = sorted(all_dst_port_id_to_ip.keys()) + + # Calculate the split points + split_points = [all_keys[i * len(all_keys) // num_splits] for i in range(1, num_splits)] + + # Execute with one set of dst port at a time, avoids ptf run getting timed out + for start, end in zip([0] + split_points, split_points + [len(all_keys)]): + run_test_for_dst_port(start, end) + + set_static_route(combined_ips_map, False) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 1a40a49eb09..4233c93de66 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -12,6 +12,7 @@ import texttable import math import os +import concurrent.futures from ptf.testutils import (ptf_ports, dp_poll, simple_arp_packet, @@ -177,7 +178,8 @@ def construct_tcp_pkt(pkt_len, dst_mac, src_mac, src_ip, dst_ip, dscp, src_vlan, return pkt -def get_multiple_flows(dp, dst_mac, dst_id, dst_ip, src_vlan, dscp, ecn, ttl, pkt_len, src_details, packets_per_port=1): +def get_multiple_flows(dp, dst_mac, dst_id, dst_ip, src_vlan, dscp, ecn, ttl, + pkt_len, src_details, packets_per_port=1, check_actual_dst_id=True): ''' Returns a dict of format: src_id : [list of (pkt, exp_pkt) pairs that go to the given dst_id] @@ -232,7 +234,10 @@ def get_rx_port_pkt(dp, src_port_id, pkt, exp_pkt): all_pkts[src_tuple[0]] except KeyError: all_pkts[src_tuple[0]] = [] - actual_dst_id = get_rx_port_pkt(dp, src_tuple[0], pkt, masked_exp_pkt) + if check_actual_dst_id is False: + actual_dst_id = dst_id + else: + actual_dst_id = get_rx_port_pkt(dp, src_tuple[0], pkt, masked_exp_pkt) if actual_dst_id == dst_id: all_pkts[src_tuple[0]].append((pkt, masked_exp_pkt, dst_id)) num_of_pkts += 1 @@ -5668,3 +5673,183 @@ def runTest(self): print("Successfully dropped {} packets".format(drops), file=sys.stderr) finally: self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) + + +class FullMeshTrafficSanity(sai_base_test.ThriftInterfaceDataPlane): + def setUp(self): + sai_base_test.ThriftInterfaceDataPlane.setUp(self) + time.sleep(5) + switch_init(self.clients) + + # Parse input parameters + self.testbed_type = self.test_params['testbed_type'] + self.router_mac = self.test_params['router_mac'] + self.sonic_version = self.test_params['sonic_version'] + + dscp_to_q_map = self.test_params['dscp_to_q_map'] + self.dscps = [int(key) for key in dscp_to_q_map.keys()] + self.queues = [int(value) for value in dscp_to_q_map.values()] + self.all_src_port_id_to_ip = self.test_params['all_src_port_id_to_ip'] + self.all_src_port_id_to_name = self.test_params['all_src_port_id_to_name'] + self.all_dst_port_id_to_ip = self.test_params['all_dst_port_id_to_ip'] + self.all_dst_port_id_to_name = self.test_params['all_dst_port_id_to_name'] + + self.all_port_id_to_ip = dict() + self.all_port_id_to_ip.update(self.all_src_port_id_to_ip) + self.all_port_id_to_ip.update(self.all_dst_port_id_to_ip) + + self.all_port_id_to_name = dict() + self.all_port_id_to_name.update(self.all_src_port_id_to_name) + self.all_port_id_to_name.update(self.all_dst_port_id_to_name) + + self.src_port_ids = list(self.all_src_port_id_to_ip.keys()) + self.dst_port_ids = list(self.all_dst_port_id_to_ip.keys()) + self.all_port_ids = self.src_port_ids + list(set(self.dst_port_ids) - set(self.src_port_ids)) + + self.asic_type = self.test_params['sonic_asic_type'] + self.packet_size = 100 + logging.info("Using packet size", self.packet_size) + self.flows_per_port = 6 + + self.all_port_id_to_mac = {port_id: self.dataplane.get_mac(0, port_id) + for port_id in self.all_port_id_to_ip.keys()} + + def tearDown(self): + sai_base_test.ThriftInterfaceDataPlane.tearDown(self) + + def config_traffic(self, dst_port_id, dscp, ecn_bit): + if type(ecn_bit) == bool: + ecn_bit = 1 if ecn_bit else 0 + self.dscp = dscp + self.dst_port_id = dst_port_id + self.tos = (dscp << 2) | ecn_bit + self.ttl = 64 + logging.debug("Getting multiple flows to {:>2}, dscp={}, dst_ip={}".format( + self.dst_port_id, self.dscp, self.dst_port_ip) + ) + self.pkt = get_multiple_flows( + self, + self.dst_port_mac, + dst_port_id, + self.dst_port_ip, + None, + dscp, + ecn_bit, + 64, + self.packet_size, + [(src_port_id, src_port_ip) for src_port_id, src_port_ip in self.all_src_port_id_to_ip.items()], + self.flows_per_port, + False) + logging.debug("Got multiple flows to {:>2}, dscp={}, dst_ip={}".format( + self.dst_port_id, self.dscp, self.dst_port_ip) + ) + + def runTest(self): + try: + failed_pairs = set() + logging.info("Total traffic src_dst_pairs being tested {}".format( + len(self.src_port_ids)*len(self.dst_port_ids)) + ) + pkt_count = 10 + + # Split the src port list for concurrent pkt injection + num_splits = 2 + split_points = [i * len(self.src_port_ids) // num_splits for i in range(1, num_splits)] + parts = [self.src_port_ids[i:j] for i, j in zip([0] + split_points, split_points + [None])] + + def runTestPerSrcList(src_port_list, checkCounter=False): + for src_port_id in src_port_list: + logging.debug( + "Sending {} packets X {} flows with dscp/queue {}/{} from src {} -> dst {}".format( + pkt_count, + len(self.pkt[src_port_id]), dscp, queue, + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name) + ) + if checkCounter: + port_cnt_base, q_cntrs_base = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + + for pkt_tuple in self.pkt[src_port_id]: + logging.debug( + "Sending {} packets with dscp/queue {}/{} from src {} -> dst {} Pkt {}".format( + pkt_count, dscp, queue, + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, pkt_tuple[0]) + ) + send_packet(self, src_port_id, pkt_tuple[0], pkt_count) + + if checkCounter: + time.sleep(1) + port_cntrs, q_cntrs = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + pkts_enqueued = q_cntrs[queue] - q_cntrs_base[queue] + if pkts_enqueued < self.flows_per_port*pkt_count: + logging.info("Faulty src/dst {}/{} pair on queue {}".format( + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, queue + )) + logging.info("q_cntrs_base {}".format(q_cntrs_base)) + logging.info("q_cntrs {}".format(q_cntrs)) + logging.info("port_cnt_base {}".format(port_cnt_base)) + logging.info("port_cntrs {}".format(port_cntrs)) + failed_pairs.add( + ( + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, queue + ) + ) + + def findFaultySrcDstPair(dscp, queue): + ecn_bit = 1 if queue in [3, 4] else 0 + self.config_traffic(real_dst_port_id, dscp, ecn_bit) + runTestPerSrcList(self.src_port_ids, True) + + for dst_port_id in self.dst_port_ids: + real_dst_port_id = dst_port_id + dst_port_name = self.all_port_id_to_name.get(real_dst_port_id, 'Not Found') + logging.info("Starting Test for dst {}".format(dst_port_name)) + dst_port_mac = self.all_port_id_to_mac[real_dst_port_id] + self.dst_port_mac = self.router_mac if self.router_mac != '' else dst_port_mac + self.dst_port_ip = self.all_port_id_to_ip[real_dst_port_id] + + for i, dscp in enumerate(self.dscps): + queue = self.queues[i] # Need queue for occupancy verification + ecn_bit = 1 if queue in [3, 4] else 0 + self.config_traffic(real_dst_port_id, dscp, ecn_bit) + + port_cnt_base, q_cntrs_base = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + + with concurrent.futures.ThreadPoolExecutor(max_workers=num_splits) as executor: + # Submit the tasks to the executor + futures = [executor.submit(runTestPerSrcList, part) for part in parts] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + + time.sleep(1) + port_cntrs, q_cntrs = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + pkts_enqueued = q_cntrs[queue] - q_cntrs_base[queue] + logging.info("Enqueued on queue {} pkts {}".format(queue, pkts_enqueued)) + if pkts_enqueued < self.flows_per_port*pkt_count*len(self.src_port_ids): + logging.info("q_cntrs_base {}".format(q_cntrs_base)) + logging.info("q_cntrs {}".format(q_cntrs)) + logging.info("port_cnt_base {}".format(port_cnt_base)) + logging.info("port_cntrs {}".format(port_cntrs)) + # Craft pkt for given queue and + # inject from each src to find which src/dst pair is dropping pkt + findFaultySrcDstPair(dscp, queue) + + assert len(failed_pairs) == 0, "Traffic failed between {}".format(failed_pairs) + finally: + pass From 155b2e0253f13d46ff3ec28ac4a959127b8c9183 Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Wed, 13 Mar 2024 15:24:13 -0700 Subject: [PATCH 2/6] Removed the unwanted Try block Fixed a bug in longest prefix match in conditional_mark Added conditional mark to skip if not ptf64 topo --- .../plugins/conditional_mark/__init__.py | 2 +- .../tests_mark_conditions.yaml | 8 +- tests/saitests/py3/sai_qos_tests.py | 187 +++++++++--------- 3 files changed, 100 insertions(+), 97 deletions(-) diff --git a/tests/common/plugins/conditional_mark/__init__.py b/tests/common/plugins/conditional_mark/__init__.py index 366d50cf8b6..cb7d6b57690 100644 --- a/tests/common/plugins/conditional_mark/__init__.py +++ b/tests/common/plugins/conditional_mark/__init__.py @@ -419,7 +419,7 @@ def find_longest_matches(nodeid, conditions): for condition in conditions: # condition is a dict which has only one item, so we use condition.keys()[0] to get its key. if nodeid.startswith(list(condition.keys())[0]): - length = len(condition) + length = len(list(condition.keys())[0]) if length > max_length: max_length = length longest_matches = [] diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 212a20c7297..e460647101f 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1048,7 +1048,7 @@ qos/test_qos_sai.py::TestQosSai: skip: reason: "Unsupported testbed type." conditions: - - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic', 'ptf64'] and asic_type not in ['mellanox']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testIPIPQosSaiDscpToPgMapping: skip: @@ -1098,6 +1098,12 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiDwrrWeightChange: conditions: - "asic_type in ['mellanox']" +qos/test_qos_sai.py::TestQosSai::testQosSaiFullMeshTrafficSanity: + skip: + reason: "Unsupported testbed type." + conditions: + - "topo_name not in ['ptf64'] and asic_type in ['cisco-8000']" + qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: skip: reason: "Headroom pool size not supported." diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 4233c93de66..221adfc3edc 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -123,10 +123,10 @@ def get_ip_addr(): def get_tcp_port(): val = 1234 while True: - if val == 65535: - raise RuntimeError("We ran out of tcp ports!") - val = max(val, (val+10) % 65535) yield val + val += 10 + if val > 65534: + val = 1234 TCP_PORT_GEN = get_tcp_port() @@ -5745,111 +5745,108 @@ def config_traffic(self, dst_port_id, dscp, ecn_bit): ) def runTest(self): - try: - failed_pairs = set() - logging.info("Total traffic src_dst_pairs being tested {}".format( - len(self.src_port_ids)*len(self.dst_port_ids)) - ) - pkt_count = 10 - - # Split the src port list for concurrent pkt injection - num_splits = 2 - split_points = [i * len(self.src_port_ids) // num_splits for i in range(1, num_splits)] - parts = [self.src_port_ids[i:j] for i, j in zip([0] + split_points, split_points + [None])] - - def runTestPerSrcList(src_port_list, checkCounter=False): - for src_port_id in src_port_list: - logging.debug( - "Sending {} packets X {} flows with dscp/queue {}/{} from src {} -> dst {}".format( - pkt_count, - len(self.pkt[src_port_id]), dscp, queue, - self.all_port_id_to_name.get(src_port_id, 'Not Found'), - dst_port_name) - ) - if checkCounter: - port_cnt_base, q_cntrs_base = sai_thrift_read_port_counters( - self.dst_client, self.asic_type, - port_list['dst'][real_dst_port_id] - ) - - for pkt_tuple in self.pkt[src_port_id]: - logging.debug( - "Sending {} packets with dscp/queue {}/{} from src {} -> dst {} Pkt {}".format( - pkt_count, dscp, queue, - self.all_port_id_to_name.get(src_port_id, 'Not Found'), - dst_port_name, pkt_tuple[0]) - ) - send_packet(self, src_port_id, pkt_tuple[0], pkt_count) - - if checkCounter: - time.sleep(1) - port_cntrs, q_cntrs = sai_thrift_read_port_counters( - self.dst_client, self.asic_type, - port_list['dst'][real_dst_port_id] - ) - pkts_enqueued = q_cntrs[queue] - q_cntrs_base[queue] - if pkts_enqueued < self.flows_per_port*pkt_count: - logging.info("Faulty src/dst {}/{} pair on queue {}".format( - self.all_port_id_to_name.get(src_port_id, 'Not Found'), - dst_port_name, queue - )) - logging.info("q_cntrs_base {}".format(q_cntrs_base)) - logging.info("q_cntrs {}".format(q_cntrs)) - logging.info("port_cnt_base {}".format(port_cnt_base)) - logging.info("port_cntrs {}".format(port_cntrs)) - failed_pairs.add( - ( - self.all_port_id_to_name.get(src_port_id, 'Not Found'), - dst_port_name, queue - ) - ) - - def findFaultySrcDstPair(dscp, queue): - ecn_bit = 1 if queue in [3, 4] else 0 - self.config_traffic(real_dst_port_id, dscp, ecn_bit) - runTestPerSrcList(self.src_port_ids, True) - - for dst_port_id in self.dst_port_ids: - real_dst_port_id = dst_port_id - dst_port_name = self.all_port_id_to_name.get(real_dst_port_id, 'Not Found') - logging.info("Starting Test for dst {}".format(dst_port_name)) - dst_port_mac = self.all_port_id_to_mac[real_dst_port_id] - self.dst_port_mac = self.router_mac if self.router_mac != '' else dst_port_mac - self.dst_port_ip = self.all_port_id_to_ip[real_dst_port_id] - - for i, dscp in enumerate(self.dscps): - queue = self.queues[i] # Need queue for occupancy verification - ecn_bit = 1 if queue in [3, 4] else 0 - self.config_traffic(real_dst_port_id, dscp, ecn_bit) - + failed_pairs = set() + logging.info("Total traffic src_dst_pairs being tested {}".format( + len(self.src_port_ids)*len(self.dst_port_ids)) + ) + pkt_count = 10 + + # Split the src port list for concurrent pkt injection + num_splits = 2 + split_points = [i * len(self.src_port_ids) // num_splits for i in range(1, num_splits)] + parts = [self.src_port_ids[i:j] for i, j in zip([0] + split_points, split_points + [None])] + + def runTestPerSrcList(src_port_list, checkCounter=False): + for src_port_id in src_port_list: + logging.debug( + "Sending {} packets X {} flows with dscp/queue {}/{} from src {} -> dst {}".format( + pkt_count, + len(self.pkt[src_port_id]), dscp, queue, + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name) + ) + if checkCounter: port_cnt_base, q_cntrs_base = sai_thrift_read_port_counters( self.dst_client, self.asic_type, port_list['dst'][real_dst_port_id] ) - with concurrent.futures.ThreadPoolExecutor(max_workers=num_splits) as executor: - # Submit the tasks to the executor - futures = [executor.submit(runTestPerSrcList, part) for part in parts] - - # Wait for all tasks to complete - concurrent.futures.wait(futures) - + for pkt_tuple in self.pkt[src_port_id]: + logging.debug( + "Sending {} packets with dscp/queue {}/{} from src {} -> dst {} Pkt {}".format( + pkt_count, dscp, queue, + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, pkt_tuple[0]) + ) + send_packet(self, src_port_id, pkt_tuple[0], pkt_count) + + if checkCounter: time.sleep(1) port_cntrs, q_cntrs = sai_thrift_read_port_counters( self.dst_client, self.asic_type, port_list['dst'][real_dst_port_id] ) pkts_enqueued = q_cntrs[queue] - q_cntrs_base[queue] - logging.info("Enqueued on queue {} pkts {}".format(queue, pkts_enqueued)) - if pkts_enqueued < self.flows_per_port*pkt_count*len(self.src_port_ids): + if pkts_enqueued < self.flows_per_port*pkt_count: + logging.info("Faulty src/dst {}/{} pair on queue {}".format( + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, queue + )) logging.info("q_cntrs_base {}".format(q_cntrs_base)) logging.info("q_cntrs {}".format(q_cntrs)) logging.info("port_cnt_base {}".format(port_cnt_base)) logging.info("port_cntrs {}".format(port_cntrs)) - # Craft pkt for given queue and - # inject from each src to find which src/dst pair is dropping pkt - findFaultySrcDstPair(dscp, queue) + failed_pairs.add( + ( + self.all_port_id_to_name.get(src_port_id, 'Not Found'), + dst_port_name, queue + ) + ) + + def findFaultySrcDstPair(dscp, queue): + ecn_bit = 1 if queue in [3, 4] else 0 + self.config_traffic(real_dst_port_id, dscp, ecn_bit) + runTestPerSrcList(self.src_port_ids, True) + + for dst_port_id in self.dst_port_ids: + real_dst_port_id = dst_port_id + dst_port_name = self.all_port_id_to_name.get(real_dst_port_id, 'Not Found') + logging.info("Starting Test for dst {}".format(dst_port_name)) + dst_port_mac = self.all_port_id_to_mac[real_dst_port_id] + self.dst_port_mac = self.router_mac if self.router_mac != '' else dst_port_mac + self.dst_port_ip = self.all_port_id_to_ip[real_dst_port_id] + + for i, dscp in enumerate(self.dscps): + queue = self.queues[i] # Need queue for occupancy verification + ecn_bit = 1 if queue in [3, 4] else 0 + self.config_traffic(real_dst_port_id, dscp, ecn_bit) - assert len(failed_pairs) == 0, "Traffic failed between {}".format(failed_pairs) - finally: - pass + port_cnt_base, q_cntrs_base = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + + with concurrent.futures.ThreadPoolExecutor(max_workers=num_splits) as executor: + # Submit the tasks to the executor + futures = [executor.submit(runTestPerSrcList, part) for part in parts] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + + time.sleep(1) + port_cntrs, q_cntrs = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, + port_list['dst'][real_dst_port_id] + ) + pkts_enqueued = q_cntrs[queue] - q_cntrs_base[queue] + logging.info("Enqueued on queue {} pkts {}".format(queue, pkts_enqueued)) + if pkts_enqueued < self.flows_per_port*pkt_count*len(self.src_port_ids): + logging.info("q_cntrs_base {}".format(q_cntrs_base)) + logging.info("q_cntrs {}".format(q_cntrs)) + logging.info("port_cnt_base {}".format(port_cnt_base)) + logging.info("port_cntrs {}".format(port_cntrs)) + # Craft pkt for given queue and + # inject from each src to find which src/dst pair is dropping pkt + findFaultySrcDstPair(dscp, queue) + + assert len(failed_pairs) == 0, "Traffic failed between {}".format(failed_pairs) From d477e2c56d7f5ca945a4930819471f5ccc68e9bc Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Fri, 15 Mar 2024 08:34:30 -0700 Subject: [PATCH 3/6] Made adding static routes a pytest fixture --- tests/qos/qos_sai_base.py | 62 +++++++++++++++++++++++++++++++++++++ tests/qos/test_qos_sai.py | 64 +++------------------------------------ 2 files changed, 67 insertions(+), 59 deletions(-) diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index cc0b3aa1872..2c0cf592723 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -2277,3 +2277,65 @@ def populate_arp_entries( self.runPtfTest( ptfhost, testCase=saiQosTest, testParams=testParams ) + + @pytest.fixture(scope="function", autouse=False) + def set_static_route_ptf64(self, dutConfig, get_src_dst_asic_and_duts, dutTestParams, enum_frontend_asic_index): + def generate_ip_address(base_ip, new_first_octet): + octets = base_ip.split('.') + if len(octets) != 4: + raise ValueError("Invalid IP address format") + octets[0] = str(new_first_octet) + octets[2] = octets[3] + octets[3] = '1' + return '.'.join(octets) + + def combine_ips(src_ips, dst_ips, new_first_octet): + combined_ips_map = {} + + for key, src_info in src_ips.items(): + src_ip = src_info['peer_addr'] + new_ip = generate_ip_address(src_ip, new_first_octet) + combined_ips_map[key] = {'original_ip': src_ip, 'generated_ip': new_ip} + + for key, dst_info in dst_ips.items(): + dst_ip = dst_info['peer_addr'] + new_ip = generate_ip_address(dst_ip, new_first_octet) + combined_ips_map[key] = {'original_ip': dst_ip, 'generated_ip': new_ip} + + return combined_ips_map + + def configRoutePrefix(add_route): + action = "add" if add_route else "del" + for port, entry in combined_ips_map.items(): + if enum_frontend_asic_index is None: + src_asic.shell("config route {} prefix {}.0/24 nexthop {}".format( + action, '.'.join(entry['generated_ip'].split('.')[:3]), entry['original_ip'])) + else: + src_asic.shell("ip netns exec asic{} config route {} prefix {}.0/24 nexthop {}".format( + enum_frontend_asic_index, + action, '.'.join(entry['generated_ip'].split('.')[:3]), + entry['original_ip']) + ) + + if dutTestParams["basicParams"]["sonic_asic_type"] != "cisco-8000": + pytest.skip("Traffic sanity test is not supported") + + if dutTestParams["topo"] != "ptf64": + pytest.skip("Test not supported in {} topology. Use ptf64 topo".format(dutTestParams["topo"])) + + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + src_asic = get_src_dst_asic_and_duts['src_asic'] + + src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] + dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index] + + new_first_octet = 100 + combined_ips_map = combine_ips(src_testPortIps, dst_testPortIps, new_first_octet) + + configRoutePrefix(True) + yield combined_ips_map + configRoutePrefix(False) + return diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index e282d5e9b36..1eba931e413 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -2257,8 +2257,8 @@ def testQosSaiLossyQueueVoqMultiSrc( def testQosSaiFullMeshTrafficSanity( self, ptfhost, dutTestParams, dutConfig, dutQosConfig, - get_src_dst_asic_and_duts, dut_qos_maps, duthosts, # noqa F811 - enum_frontend_dut_hostname, enum_frontend_asic_index + get_src_dst_asic_and_duts, dut_qos_maps, # noqa F811 + set_static_route_ptf64 ): """ Test QoS SAI traffic sanity @@ -2273,7 +2273,7 @@ def testQosSaiFullMeshTrafficSanity( Raises: RunAnsibleModuleFail if ptf test fails """ - # Concurrent execution with a specific set of dst port + # Execution with a specific set of dst port def run_test_for_dst_port(start, end): test_params = dict() test_params.update(dutTestParams["basicParams"]) @@ -2291,48 +2291,6 @@ def run_test_for_dst_port(start, end): self.runPtfTest(ptfhost, testCase="sai_qos_tests.FullMeshTrafficSanity", testParams=test_params) - def generate_ip_address(base_ip, new_first_octet): - octets = base_ip.split('.') - if len(octets) != 4: - raise ValueError("Invalid IP address format") - octets[0] = str(new_first_octet) - octets[2] = octets[3] - octets[3] = '1' - return '.'.join(octets) - - def combine_ips(src_ips, dst_ips, new_first_octet): - combined_ips_map = {} - - for key, src_info in src_ips.items(): - src_ip = src_info['peer_addr'] - new_ip = generate_ip_address(src_ip, new_first_octet) - combined_ips_map[key] = {'original_ip': src_ip, 'generated_ip': new_ip} - - for key, dst_info in dst_ips.items(): - dst_ip = dst_info['peer_addr'] - new_ip = generate_ip_address(dst_ip, new_first_octet) - combined_ips_map[key] = {'original_ip': dst_ip, 'generated_ip': new_ip} - - return combined_ips_map - - def set_static_route(combined_ips_map, add_route=True): - action = "add" if add_route else "del" - for port, entry in combined_ips_map.items(): - if enum_frontend_asic_index is None: - duthost.shell("config route {} prefix {}.0/24 nexthop {}".format( - action, '.'.join(entry['generated_ip'].split('.')[:3]), entry['original_ip'])) - else: - duthost.shell("ip netns exec asic{} config route {} prefix {}.0/24 nexthop {}".format( - enum_frontend_asic_index, - action, '.'.join(entry['generated_ip'].split('.')[:3]), - entry['original_ip']) - ) - - if dutTestParams["basicParams"]["sonic_asic_type"] != "cisco-8000": - pytest.skip("Traffic sanity test is not supported") - - duthost = duthosts[enum_frontend_dut_hostname] - src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] @@ -2341,9 +2299,6 @@ def set_static_route(combined_ips_map, add_route=True): src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index] - new_first_octet = 100 - combined_ips_map = combine_ips(src_testPortIps, dst_testPortIps, new_first_octet) - # Fetch all port IDs and IPs all_src_port_id_to_ip = {port_id: src_testPortIps[port_id]['peer_addr'] for port_id in src_testPortIps.keys()} @@ -2353,7 +2308,7 @@ def set_static_route(combined_ips_map, add_route=True): } all_dst_port_id_to_ip = { - port_id: combined_ips_map[port_id]['generated_ip'] + port_id: set_static_route_ptf64[port_id]['generated_ip'] for port_id in dst_testPortIps.keys() } @@ -2370,14 +2325,7 @@ def set_static_route(combined_ips_map, add_route=True): "Need both TC_TO_PRIORITY_GROUP_MAP and DSCP_TO_TC_MAP" "and key AZURE to run this test.") - if (dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS or - dutTestParams["topo"] in self.SUPPORTED_T1_TOPOS or - dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS): - dscp_to_q_map = {tc_to_dscp_map[tc]: tc_to_q_map[tc] for tc in tc_to_dscp_map if tc != 7} - else: - pytest.skip("Test not supported in T2 topology") - - set_static_route(combined_ips_map, True) + dscp_to_q_map = {tc_to_dscp_map[tc]: tc_to_q_map[tc] for tc in tc_to_dscp_map if tc != 7} # Define the number of splits # for the dst port list @@ -2392,5 +2340,3 @@ def set_static_route(combined_ips_map, add_route=True): # Execute with one set of dst port at a time, avoids ptf run getting timed out for start, end in zip([0] + split_points, split_points + [len(all_keys)]): run_test_for_dst_port(start, end) - - set_static_route(combined_ips_map, False) From b53bf930d2c4cc479b71c7800819b1cd13cbfadb Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:46:48 -0700 Subject: [PATCH 4/6] Added fixture to configure ip on ptf interfaces while using PTF64 topo --- tests/qos/qos_sai_base.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 2c0cf592723..fb9c666892f 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -780,9 +780,32 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, "src_port_vlan": srcVlan } + @pytest.fixture(scope='class', autouse=False) + def configure_ip_on_ptf_intfs(self, ptfhost, get_src_dst_asic_and_duts, tbinfo): + src_dut = get_src_dst_asic_and_duts['src_dut'] + src_mgFacts = src_dut.get_extended_minigraph_facts(tbinfo) + topo = tbinfo["topo"]["name"] + + # if PTF64 and is Cisco, set ip IP address on eth interfaces of the ptf" + if topo == 'ptf64' and is_cisco_device(src_dut): + minigraph_ip_interfaces = src_mgFacts['minigraph_interfaces'] + for entry in minigraph_ip_interfaces: + ptfhost.shell("ip addr add {}/31 dev eth{}".format( + entry['peer_addr'], src_mgFacts["minigraph_ptf_indices"][entry['attachto']]) + ) + yield + for entry in minigraph_ip_interfaces: + ptfhost.shell("ip addr del {}/31 dev eth{}".format( + entry['peer_addr'], src_mgFacts["minigraph_ptf_indices"][entry['attachto']]) + ) + return + else: + yield + return + @pytest.fixture(scope='class', autouse=True) def dutConfig( - self, request, duthosts, get_src_dst_asic_and_duts, + self, request, duthosts, configure_ip_on_ptf_intfs, get_src_dst_asic_and_duts, lower_tor_host, tbinfo, dualtor_ports_for_duts, dut_qos_maps): # noqa F811 """ Build DUT host config pertaining to QoS SAI tests From 2bf298754b10907d085b64c3a9a6770949019643 Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Mon, 13 May 2024 02:22:25 -0700 Subject: [PATCH 5/6] Changed to skip for all platforms except Cisco --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 83be9c87d6f..7ea20ed61d5 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1140,9 +1140,9 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiDwrrWeightChange: qos/test_qos_sai.py::TestQosSai::testQosSaiFullMeshTrafficSanity: skip: - reason: "Unsupported testbed type." + reason: "Unsupported platform or testbed type." conditions: - - "topo_name not in ['ptf64'] and asic_type in ['cisco-8000']" + - "asic_type not in ['cisco-8000'] or topo_name not in ['ptf64']" qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: skip: From e45042dd203fe69d0addeddf8ddf471d6f23e4ad Mon Sep 17 00:00:00 2001 From: Sreejith Sreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Mon, 13 May 2024 04:05:16 -0700 Subject: [PATCH 6/6] flake8 issues reported from changes in PR 9896 --- tests/saitests/py3/sai_qos_tests.py | 62 ++++++++++++++--------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index ad25e9d081a..2ea7b71fb75 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -1644,7 +1644,7 @@ def runTest(self): 'xmit_counters {}\n\txmit_counters_base {}\n'.format( # noqa F523 test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port no pfc - assert(recv_counters[pg] == recv_counters_base[pg]), \ + assert (recv_counters[pg] == recv_counters_base[pg]), \ 'unexpectedly PFC counter increase, {}'.format(test_stage) # recv port no ingress drop # For dnx few extra ipv6 NS/RA pkt received from VM, adding to counter value @@ -1655,11 +1655,11 @@ def runTest(self): assert (recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) else: - assert(recv_counters[cntr] == recv_counters_base[cntr]),\ + assert (recv_counters[cntr] == recv_counters_base[cntr]),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + assert (xmit_counters[cntr] == xmit_counters_base[cntr]), \ 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send 1 packet to trigger pfc @@ -1683,7 +1683,7 @@ def runTest(self): xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), \ + assert (recv_counters[pg] > recv_counters_base[pg]), \ 'unexpectedly PFC counter not increase, {}'.format(test_stage) # recv port no ingress drop # For dnx few extra ipv6 NS/RA pkt received from VM, adding to counter value @@ -1694,11 +1694,11 @@ def runTest(self): assert (recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) else: - assert(recv_counters[cntr] == recv_counters_base[cntr]),\ + assert (recv_counters[cntr] == recv_counters_base[cntr]),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + assert (xmit_counters[cntr] == xmit_counters_base[cntr]), \ 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send packets short of ingress drop @@ -1718,7 +1718,7 @@ def runTest(self): 'xmit_counters {}\n\txmit_counters_base {}\n'.format( # noqa F841 test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), \ + assert (recv_counters[pg] > recv_counters_base[pg]), \ 'unexpectedly PFC counter not increase, {}'.format(test_stage) # recv port no ingress drop # For dnx few extra ipv6 NS/RA pkt received from VM, adding to counter value @@ -1729,11 +1729,11 @@ def runTest(self): assert (recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) else: - assert(recv_counters[cntr] == recv_counters_base[cntr]),\ + assert (recv_counters[cntr] == recv_counters_base[cntr]),\ 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + assert (xmit_counters[cntr] == xmit_counters_base[cntr]), \ 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send 1 packet to trigger ingress drop @@ -1752,21 +1752,21 @@ def runTest(self): 'xmit_counters {}\n\txmit_counters_base {}\n'.format( # noqa F841 test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), \ + assert (recv_counters[pg] > recv_counters_base[pg]), \ 'unexpectedly PFC counter not increase, {}'.format(test_stage) # recv port ingress drop if self.hwsku not in ['Cisco-8800-LC-48H-C48']: for cntr in ingress_counters: if platform_asic and platform_asic == "broadcom-dnx": if cntr == 1: - assert(recv_counters[cntr] > recv_counters_base[cntr]), \ + assert (recv_counters[cntr] > recv_counters_base[cntr]), \ 'unexpectedly RX drop counter not increase, {}'.format(test_stage) else: - assert(recv_counters[cntr] > recv_counters_base[cntr]), 'unexpectedly RX drop counter' \ + assert (recv_counters[cntr] > recv_counters_base[cntr]), 'unexpectedly RX drop counter' \ ' not increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]),\ + assert (xmit_counters[cntr] == xmit_counters_base[cntr]),\ 'unexpectedly TX drop counter increase, {}'.format(test_stage) if '201811' not in sonic_version and 'mellanox' in asic_type: @@ -2519,7 +2519,7 @@ def runTest(self): 'unexpectedly ingress drop on recv port (counter: {}), at step {} {}'.format( port_counter_fields[cntr], step_id, step_desc) else: - assert(recv_counters[cntr] == recv_counters_base[cntr]),\ + assert (recv_counters[cntr] == recv_counters_base[cntr]),\ 'unexpectedly ingress drop on recv port (counter: {}), at step {} {}'.format( port_counter_fields[cntr], step_id, step_desc) # xmit port no egress drop @@ -2633,7 +2633,7 @@ def runTest(self): sys.stderr.write('{}\n'.format(port_cnt_tbl)) # recv port no pfc - assert( + assert ( recv_counters[pg] == recv_counters_base[pg] ), 'unexpectedly trigger PFC for PG {} (counter: {}), at step {} {}'.format( pg, port_counter_fields[pg], step_id, step_desc) @@ -2959,7 +2959,7 @@ def runTest(self): if self.platform_asic and self.platform_asic == "broadcom-dnx": self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, self.uniq_dst_ports) sys.exit("Too many pkts needed to trigger pfc: %d" % (pkt_cnt)) - assert(recv_counters[sidx_dscp_pg_tuples[i][2]] > + assert (recv_counters[sidx_dscp_pg_tuples[i][2]] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) print("%d packets for sid: %d, pg: %d to trigger pfc" % ( pkt_cnt, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], sidx_dscp_pg_tuples[i][2] - 2), @@ -3036,8 +3036,8 @@ def runTest(self): print("pkts sent: %d, lower bound: %d, actual headroom pool watermark: %d, upper_bound: %d" % ( wm_pkt_num, expected_wm, hdrm_pool_wm, upper_bound_wm), file=sys.stderr) if 'innovium' not in self.asic_type: - assert(expected_wm <= hdrm_pool_wm) - assert(hdrm_pool_wm <= upper_bound_wm) + assert (expected_wm <= hdrm_pool_wm) + assert (hdrm_pool_wm <= upper_bound_wm) if self.platform_asic and self.platform_asic == "broadcom-dnx": time.sleep(8) for i in range(0, self.pgs_num): @@ -3080,7 +3080,7 @@ def runTest(self): else: # assert ingress drop for cntr in self.ingress_counters: - assert(recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) + assert (recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) # assert no egress drop at the dut xmit port if self.platform_asic != "broadcom-dnx": @@ -3101,14 +3101,14 @@ def runTest(self): sys.stderr.write('After PG headroom filled, actual headroom pool watermark {}, upper_bound {}\n'.format( hdrm_pool_wm, upper_bound_wm)) if 'innovium' not in self.asic_type: - assert(expected_wm <= hdrm_pool_wm) - assert(hdrm_pool_wm <= upper_bound_wm) + assert (expected_wm <= hdrm_pool_wm) + assert (hdrm_pool_wm <= upper_bound_wm) # at this point headroom pool should be full. send few more packets to continue causing drops print("overflow headroom pool", file=sys.stderr) send_packet(self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, 10) hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark( self.src_client, self.buf_pool_roid) - assert(hdrm_pool_wm <= self.max_headroom) + assert (hdrm_pool_wm <= self.max_headroom) sys.stderr.flush() finally: @@ -3814,9 +3814,9 @@ def runTest(self): if cntr == 1: print("recv_counters_base: %d, recv_counters: %d" % (recv_counters_base[cntr], recv_counters[cntr]), file=sys.stderr) - assert(recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN) + assert (recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN) else: - assert(recv_counters[cntr] == recv_counters_base[cntr]) + assert (recv_counters[cntr] == recv_counters_base[cntr]) # xmit port no egress drop for cntr in egress_counters: assert (xmit_counters[cntr] == xmit_counters_base[cntr]) @@ -4882,7 +4882,7 @@ def runTest(self): logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + "stat - so ignoring this step for now") else: - assert(q_wm_res[queue] <= 1 * cell_size) + assert (q_wm_res[queue] <= 1 * cell_size) # send packet batch of fixed packet numbers to fill queue shared # first round sends only 1 packet @@ -4906,7 +4906,7 @@ def runTest(self): if 'cisco-8000' in asic_type: self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) - assert(fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) + assert (fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) pkts_total += pkts_num pkts_num = pkts_total - 1 @@ -4921,7 +4921,7 @@ def runTest(self): time.sleep(8) - if( + if ( que_min_pkts_num == 0 and pkts_num <= 1 + margin and check_leackout_compensation_support(asic_type, hwsku) @@ -5194,9 +5194,9 @@ def runTest(self): ), file=sys.stderr, ) - assert(buffer_pool_wm <= (expected_wm + - upper_bound_margin) * cell_size) - assert((expected_wm - lower_bound_margin) + assert (buffer_pool_wm <= (expected_wm + + upper_bound_margin) * cell_size) + assert ((expected_wm - lower_bound_margin) * cell_size <= buffer_pool_wm) pkts_num = pkts_inc @@ -5391,7 +5391,7 @@ def runTest(self): if 'cisco-8000' in asic_type: # Queue is always the inner_dscp due to the TC_TO_QUEUE_MAP redirection queue = inner_dscp - assert(fill_leakout_plus_one(self, src_port_id, + assert (fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) num_pkts = pkts_num_trig_pfc - pkts_num_margin - 1 send_packet(self, src_port_id, pkt, num_pkts)