diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index eec71067415..5f16c25ff32 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -711,7 +711,7 @@ def dutConfig( # LAG ports in T1 TOPO need to be removed in Mellanox devices - if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(src_dut): + if topo in self.SUPPORTED_T0_TOPOS or (topo in self.SUPPORTED_PTF_TOPOS and isMellanoxDevice(src_dut)): # Only single asic is supported for this scenario, so use src_dut and src_asic - which will be the same # as dst_dut and dst_asic pytest_assert( @@ -817,6 +817,10 @@ def dutConfig( testPortIds[src_dut_index][dut_asic.asic_index] = sorted(dutPortIps[src_dut_index][dut_asic.asic_index].keys()) + if isMellanoxDevice(src_dut): + testPortIds[src_dut_index][dut_asic.asic_index] = self.select_port_ids_for_mellnaox_device( + src_dut, src_mgFacts, testPortIds[src_dut_index][dut_asic.asic_index]) + # Need to fix this testPortIps[src_dut_index] = {} testPortIps[src_dut_index][src_asic_index] = self.__assignTestPortIps(src_mgFacts) @@ -2017,3 +2021,36 @@ def skip_check_for_hbm_either_asic(self, get_src_dst_asic_and_duts): "This test needs to be revisited for HBM enabled systems.") yield return + + def select_port_ids_for_mellnaox_device(self, duthost, mgFacts, testPortIds): + """ + For Nvidia devices, the tested ports must have the same cable length and speed. + Firstly, categorize the ports by the same cable length and speed. + Secondly, select the port group with the largest number of ports as test ports from the above results. + """ + ptf_port_dut_port_dict = dict(zip(mgFacts["minigraph_ptf_indices"].values(), + mgFacts["minigraph_ptf_indices"].keys())) + get_interface_cable_length_info = 'redis-cli -n 4 hgetall "CABLE_LENGTH|AZURE"' + interface_cable_length_list = duthost.shell(get_interface_cable_length_info)['stdout_lines'] + interface_status = duthost.show_interface(command="status")["ansible_facts"]['int_status'] + + cable_length_speed_interface_dict = {} + for ptf_port in testPortIds: + dut_port = ptf_port_dut_port_dict[ptf_port] + if dut_port in interface_cable_length_list: + cable_length = interface_cable_length_list[interface_cable_length_list.index(dut_port) + 1] + speed = interface_status[dut_port]['speed'] + cable_length_speed = "{}_{}".format(cable_length, speed) + if cable_length_speed in cable_length_speed_interface_dict: + cable_length_speed_interface_dict[cable_length_speed].append(ptf_port) + else: + cable_length_speed_interface_dict[cable_length_speed] = [ptf_port] + max_port_num = 0 + test_port_ids = [] + # Find the port group with the largest number of ports as test ports + for _, port_list in cable_length_speed_interface_dict.items(): + if max_port_num < len(port_list): + test_port_ids = port_list + max_port_num = len(port_list) + logger.info("Test ports ids is{}".format(test_port_ids)) + return test_port_ids diff --git a/tests/qos/test_tunnel_qos_remap.py b/tests/qos/test_tunnel_qos_remap.py index 65938fd7841..22821780ec8 100644 --- a/tests/qos/test_tunnel_qos_remap.py +++ b/tests/qos/test_tunnel_qos_remap.py @@ -659,7 +659,7 @@ def test_pfc_watermark_extra_lossless_active(ptfhost, fanouthosts, rand_selected @pytest.mark.disable_loganalyzer -def test_tunnel_decap_dscp_to_pg_mapping(rand_selected_dut, ptfhost, dut_config, setup_module, tunnel_qos_maps): # noqa F811 +def test_tunnel_decap_dscp_to_pg_mapping(rand_selected_dut, ptfhost, dut_config, setup_module, tunnel_qos_maps, creds): # noqa F811 """ Test steps: 1. Toggle all ports to active on randomly selected ToR @@ -698,7 +698,9 @@ def test_tunnel_decap_dscp_to_pg_mapping(rand_selected_dut, ptfhost, dut_config, "sonic_asic_type": dut_config["asic_type"], "platform_asic": dut_config["platform_asic"], "packet_size": packet_size, - "cell_size": cell_size + "cell_size": cell_size, + "dut_username": creds['sonicadmin_user'], + "dut_password": creds['sonicadmin_password'] }) run_ptf_test( @@ -710,7 +712,7 @@ def test_tunnel_decap_dscp_to_pg_mapping(rand_selected_dut, ptfhost, dut_config, @pytest.mark.disable_loganalyzer @pytest.mark.parametrize("xoff_profile", ["pcbb_xoff_1", "pcbb_xoff_2", "pcbb_xoff_3", "pcbb_xoff_4"]) -def test_xoff_for_pcbb(rand_selected_dut, ptfhost, dut_config, qos_config, xoff_profile, setup_module): +def test_xoff_for_pcbb(rand_selected_dut, ptfhost, dut_config, qos_config, xoff_profile, setup_module, creds): """ The test is to verify xoff threshold for PCBB (Priority Control for Bounced Back traffic) Test steps @@ -736,6 +738,8 @@ def test_xoff_for_pcbb(rand_selected_dut, ptfhost, dut_config, qos_config, xoff_ "port_map_file_ini": dut_config["port_map_file_ini"], "platform_asic": dut_config["platform_asic"], "sonic_asic_type": dut_config["asic_type"], + "dut_username": creds['sonicadmin_user'], + "dut_password": creds['sonicadmin_password'] }) if dut_config["asic_type"] == 'mellanox': test_params.update({'cell_size': 144, 'packet_size': 300}) diff --git a/tests/saitests/py3/sai_base_test.py b/tests/saitests/py3/sai_base_test.py index 552eabdead5..a6a21eaad88 100644 --- a/tests/saitests/py3/sai_base_test.py +++ b/tests/saitests/py3/sai_base_test.py @@ -35,6 +35,17 @@ from switch import (sai_thrift_port_tx_enable, sai_thrift_port_tx_disable) +DATA_PLANE_QUEUE_LIST = ["0", "1", "2", "3", "4", "5", "6", "7"] +DEFAULT_QUEUE_SCHEDULER_CONFIG = {"0": "scheduler.0", + "1": "scheduler.0", + "2": "scheduler.0", + "3": "scheduler.1", + "4": "scheduler.1", + "5": "scheduler.0", + "6": "scheduler.0", + "7": ""} + + class ThriftInterface(BaseTest): def setUp(self): @@ -61,6 +72,7 @@ def setUp(self): self.dst_server_ip = self.src_server_ip dst_server_port = src_server_port self.server = self.dst_server_ip + self.original_dut_port_queue_scheduler_map = {} if "port_map_file" in self.test_params: user_input = self.test_params['port_map_file'] @@ -172,7 +184,10 @@ def sai_thrift_port_tx_enable(self, client, asic_type, port_list, target='dst', assert 'Success rv = 0' in stdOut[1], "enable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.src_asic_index, self.src_server_ip) - sai_thrift_port_tx_enable(client, asic_type, port_list, target=target) + if asic_type == 'mellanox': + self.enable_mellanox_egress_data_plane(port_list) + else: + sai_thrift_port_tx_enable(client, asic_type, port_list, target=target) def sai_thrift_port_tx_disable(self, client, asic_type, port_list, target='dst'): if self.platform_asic and self.platform_asic == "broadcom-dnx": @@ -184,7 +199,53 @@ def sai_thrift_port_tx_disable(self, client, asic_type, port_list, target='dst') cmd) assert 'Success rv = 0' in stdOut[1], "disable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.src_asic_index, self.src_server_ip) - sai_thrift_port_tx_disable(client, asic_type, port_list, target=target) + if asic_type == 'mellanox': + self.disable_mellanox_egress_data_plane(port_list) + else: + sai_thrift_port_tx_disable(client, asic_type, port_list, target=target) + + def disable_mellanox_egress_data_plane(self, ptf_port_list): + self.original_dut_port_queue_scheduler_map = self.get_queue_scheduler_name(ptf_port_list) + block_data_plane_scheduler_name = 'scheduler.block_data_plane' + cmd_set_block_data_plane_scheduler = \ + f'sonic-db-cli CONFIG_DB hset "SCHEDULER|{block_data_plane_scheduler_name}" "type" DWRR "weight" 15 "pir" 1' + + self.exec_cmd_on_dut(self.server, self.test_params['dut_username'], self.test_params['dut_password'], + cmd_set_block_data_plane_scheduler) + for ptf_port in ptf_port_list: + dut_port = interface_to_front_mapping['dst'][str(ptf_port)] + for q in DATA_PLANE_QUEUE_LIST: + cmd_block_q = \ + f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {block_data_plane_scheduler_name}" + self.exec_cmd_on_dut( + self.server, self.test_params['dut_username'], self.test_params['dut_password'], cmd_block_q) + + def get_queue_scheduler_name(self, ptf_port_list): + dut_port_queue_scheduler_map = {} + for ptf_port in ptf_port_list: + dut_port = interface_to_front_mapping['dst'][str(ptf_port)] + dut_port_queue_scheduler_map[dut_port] = {} + for q in DATA_PLANE_QUEUE_LIST: + cmd_get_q_scheduler_name = f"sonic-db-cli CONFIG_DB hget 'QUEUE|{dut_port}|{q}' scheduler" + scheduler_name, _, _ = self.exec_cmd_on_dut( + self.server, self.test_params['dut_username'], + self.test_params['dut_password'], cmd_get_q_scheduler_name) + scheduler_name = scheduler_name[0].strip("\n") + dut_port_queue_scheduler_map[dut_port][q] = scheduler_name + return dut_port_queue_scheduler_map + + def enable_mellanox_egress_data_plane(self, ptf_port_list): + for ptf_port in ptf_port_list: + dut_port = interface_to_front_mapping['dst'][str(ptf_port)] + for q in DATA_PLANE_QUEUE_LIST: + scheduler_name = self.original_dut_port_queue_scheduler_map[dut_port][q] if \ + self.original_dut_port_queue_scheduler_map else DEFAULT_QUEUE_SCHEDULER_CONFIG[q] + cmd_set_q_scheduler = f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {scheduler_name}" + cmd_del_q_scheduler = f" sonic-db-cli CONFIG_DB hdel 'QUEUE|{dut_port}|{q}' scheduler " + cmd_recover_q_scheduler_config = cmd_set_q_scheduler if scheduler_name else cmd_del_q_scheduler + self.exec_cmd_on_dut( + self.server, self.test_params['dut_username'], + self.test_params['dut_password'], cmd_recover_q_scheduler_config) class ThriftInterfaceDataPlane(ThriftInterface): diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 8cbdf1f6f26..ac668751aa2 100644 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -4265,7 +4265,7 @@ def runTest(self): if pkts_num_fill_min: assert(q_wm_res[queue] == 0) - elif 'cisco-8000' in asic_type: + elif 'cisco-8000' in asic_type or "ACS-SN5600" in hwsku: assert(q_wm_res[queue] <= (margin + 1) * cell_size) else: if platform_asic and platform_asic == "broadcom-dnx": @@ -4747,8 +4747,13 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num_trig_pfc // cell_occupancy - 1 - pkts_num_margin) time.sleep(8) # Read TX_OK again to calculate leaked packet number - tx_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) - leaked_packet_number = tx_counters[TRANSMITTED_PKTS] - tx_counters_base[TRANSMITTED_PKTS] + if 'mellanox' == asic_type: + # There are not leaked packets on Nvidia dualtor devices + leaked_packet_number = 0 + else: + tx_counters, _ = sai_thrift_read_port_counters(self.dst_client, + asic_type, port_list['dst'][dst_port_id]) + leaked_packet_number = tx_counters[TRANSMITTED_PKTS] - tx_counters_base[TRANSMITTED_PKTS] # Send packets to compensate the leaked packets send_packet(self, src_port_id, pkt, leaked_packet_number) time.sleep(8)