diff --git a/tests/qos/files/mellanox/qos_param_generator.py b/tests/qos/files/mellanox/qos_param_generator.py index 87c0db50a6a..1530d12ae88 100644 --- a/tests/qos/files/mellanox/qos_param_generator.py +++ b/tests/qos/files/mellanox/qos_param_generator.py @@ -236,6 +236,11 @@ def calculate_parameters(self): self.qos_params_mlnx['wm_pg_shared_lossy'].update(wm_shared_lossy) wm_shared_lossy["pkts_num_margin"] = 8 self.qos_params_mlnx['wm_q_shared_lossy'].update(wm_shared_lossy) + if 'lossy_dscp' in self.egressLossyProfile: + lossy_queue['dscp'] = self.egressLossyProfile['lossy_dscp'] + self.qos_params_mlnx['wm_pg_shared_lossy']['dscp'] = self.egressLossyProfile['lossy_dscp'] + self.qos_params_mlnx['wm_q_shared_lossy']['dscp'] = self.egressLossyProfile['lossy_dscp'] + self.qos_params_mlnx['wm_q_shared_lossy']['queue'] = self.egressLossyProfile['lossy_queue'] wm_buf_pool_lossless = self.qos_params_mlnx['wm_buf_pool_lossless'] wm_buf_pool_lossless['pkts_num_trig_pfc'] = pkts_num_trig_pfc diff --git a/tests/qos/files/mellanox/special_qos_config.yml b/tests/qos/files/mellanox/special_qos_config.yml index 04a16ebf856..f0594014d39 100644 --- a/tests/qos/files/mellanox/special_qos_config.yml +++ b/tests/qos/files/mellanox/special_qos_config.yml @@ -27,12 +27,12 @@ qos_params: xon_4: packet_size: 800 lossy_queue_1: - packet_size: 800 + packet_size: 1200 wm_pg_shared_lossless: packet_size: 800 pkts_num_margin: 7 wm_pg_shared_lossy: - packet_size: 800 - pkts_num_margin: 5 + packet_size: 1200 + pkts_num_margin: 8 wm_q_shared_lossy: - packet_size: 800 + packet_size: 1200 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 0e7c81096d2..4ccc32d6377 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -346,8 +346,24 @@ def __getBufferProfile(self, request, dut_asic, os_version, table, port, priorit else: bufferProfileName = out.translate({ord(i): None for i in '[]'}) else: - bufferProfileName = bufkeystr + dut_asic.run_redis_cmd( - argv=["redis-cli", "-n", db, "HGET", keystr, "profile"])[0] + profile_content = dut_asic.run_redis_cmd(argv=["redis-cli", "-n", db, "HGET", keystr, "profile"]) + if profile_content: + bufferProfileName = bufkeystr + profile_content[0] + else: + logger.info("No lossless buffer. To compatible the existing case, return dump bufferProfilfe") + dump_buffer_profile = { + "profileName": f"{bufkeystr}pg_lossless_0_0m_profile", + "pool": "ingress_lossless_pool", + "xon": "0", + "xoff": "0", + "size": "0", + "dynamic_th": "0", + "pg_q_alpha": "0", + "port_alpha": "0", + "pool_size": "0", + "static_th": "0" + } + return dump_buffer_profile result = dut_asic.run_redis_cmd( argv=["redis-cli", "-n", db, "HGETALL", bufferProfileName] @@ -1781,10 +1797,13 @@ def releaseAllPorts( Raises: RunAnsibleModuleFail if ptf test fails """ - self.runPtfTest( - ptfhost, testCase="sai_qos_tests.ReleaseAllPorts", - testParams=dutTestParams["basicParams"] - ) + if isMellanoxDevice(duthosts[0]): + logger.info("skip reaseAllports fixture for mellanox device") + else: + self.runPtfTest( + ptfhost, testCase="sai_qos_tests.ReleaseAllPorts", + testParams=dutTestParams["basicParams"] + ) def __loadSwssConfig(self, duthost): """ @@ -2107,12 +2126,25 @@ def egressLossyProfile( srcport = dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]] + is_lossy_queue_only = False + if srcport in dualtor_ports_for_duts: queues = "0-1" else: - queues = "0-2" + if isMellanoxDevice(duthost): + cable_len = dut_asic.shell(f"redis-cli -n 4 hget 'CABLE_LENGTH|AZURE' {srcport}")['stdout'] + if cable_len == '0m': + is_lossy_queue_only = True + logger.info(f"{srcport} has only lossy queue") + if is_lossy_queue_only: + is_lossy_queue_only = True + queue_table_postfix_list = ['1-3', '4', '5'] + queue_to_dscp_map = {'1-3': '1', '4': '11', '5': '31'} + queues = random.choice(queue_table_postfix_list) + else: + queues = "0-2" - yield self.__getBufferProfile( + egress_lossy_profile = self.__getBufferProfile( request, dut_asic, duthost.os_version, @@ -2121,6 +2153,12 @@ def egressLossyProfile( srcport, queues ) + if is_lossy_queue_only: + egress_lossy_profile['lossy_dscp'] = queue_to_dscp_map[queues] + egress_lossy_profile['lossy_queue'] = '1' if queues == '1-3' else queues + logger.info(f"queues:{queues}, egressLossyProfile: {egress_lossy_profile}") + + yield egress_lossy_profile @pytest.fixture(scope='class') def losslessSchedProfile( diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index f72a5c96e83..166674e5efa 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -175,6 +175,11 @@ def check_skip_xon_hysteresis_test(xonHysteresisKey, dutQosConfig, " Pls see qos.yaml for the port idx's that are needed.") +def skip_test_on_no_lossless_pg(portSpeedCableLength): + if portSpeedCableLength == "0_0m": + pytest.skip("skip the test due to no buffer lossless pg") + + class TestQosSai(QosSaiBase): """TestQosSai derives from QosSaiBase and contains collection of QoS SAI test cases. @@ -411,6 +416,7 @@ def testQosSaiPfcXoffLimit( "Additional DSCPs are not supported on non-dual ToR ports") portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) if dutTestParams['hwsku'] in self.BREAKOUT_SKUS and 'backend' not in dutTestParams['topo']: qosConfig = dutQosConfig["param"][portSpeedCableLength]["breakout"] else: @@ -508,6 +514,7 @@ def testPfcStormWithSharedHeadroomOccupancy( pytest.skip("Shared Headroom has to be enabled for this test") portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) if xonProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()): qosConfig = dutQosConfig["param"][portSpeedCableLength] else: @@ -681,6 +688,7 @@ def testQosSaiPfcXonLimit( "Additional DSCPs are not supported on non-dual ToR ports") portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) if xonProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()): qosConfig = dutQosConfig["param"][portSpeedCableLength] else: @@ -858,6 +866,7 @@ def testQosSaiHeadroomPoolSize( """ portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) qosConfig = dutQosConfig["param"][portSpeedCableLength] testPortIps = dutConfig["testPortIps"] @@ -1616,6 +1625,7 @@ def testQosSaiDwrr( RunAnsibleModuleFail if ptf test fails """ portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) qosConfig = dutQosConfig["param"] if "wrr" in qosConfig[portSpeedCableLength]: qosConfigWrr = qosConfig[portSpeedCableLength]["wrr"] @@ -1695,6 +1705,9 @@ def testQosSaiPgSharedWatermark( """ portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + if pgProfile == "wm_pg_shared_lossless": + skip_test_on_no_lossless_pg(portSpeedCableLength) + if pgProfile in list(dutQosConfig["param"][portSpeedCableLength].keys()): qosConfig = dutQosConfig["param"][portSpeedCableLength] else: @@ -1798,6 +1811,7 @@ def testQosSaiPgHeadroomWatermark( RunAnsibleModuleFail if ptf test fails """ portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) if dutTestParams['hwsku'] in self.BREAKOUT_SKUS and 'backend' not in dutTestParams['topo']: qosConfig = dutQosConfig["param"][portSpeedCableLength]["breakout"] else: @@ -1910,6 +1924,8 @@ def testQosSaiQSharedWatermark( RunAnsibleModuleFail if ptf test fails """ portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + if queueProfile == "wm_q_shared_lossless": + skip_test_on_no_lossless_pg(portSpeedCableLength) if queueProfile == "wm_q_shared_lossless": if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000': @@ -1972,7 +1988,7 @@ def testQosSaiQSharedWatermark( def testQosSaiDscpToPgMapping( self, get_src_dst_asic_and_duts, duthost, request, ptfhost, dutTestParams, dutConfig, dut_qos_maps, # noqa F811 - change_lag_lacp_timer): + change_lag_lacp_timer, dutQosConfig): """ Test QoS SAI DSCP to PG mapping ptf test @@ -1990,6 +2006,8 @@ def testQosSaiDscpToPgMapping( RunAnsibleModuleFail if ptf test fails """ disableTest = request.config.getoption("--disable_test") + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + skip_test_on_no_lossless_pg(portSpeedCableLength) if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000' or \ ('platform_asic' in dutTestParams["basicParams"] and dutTestParams["basicParams"]["platform_asic"] in ["broadcom-dnx", "mellanox"]): diff --git a/tests/saitests/py3/sai_base_test.py b/tests/saitests/py3/sai_base_test.py index c31cf597446..0280d9ec965 100644 --- a/tests/saitests/py3/sai_base_test.py +++ b/tests/saitests/py3/sai_base_test.py @@ -44,6 +44,7 @@ "5": "scheduler.0", "6": "scheduler.0", "7": ""} +BLOCK_DATA_PLANE_SCHEDULER_NAME = 'scheduler.block_data_plane' class ThriftInterface(BaseTest): @@ -238,16 +239,15 @@ def disable_mellanox_egress_data_plane(self, ptf_port_list): dut_port = self.get_dut_port(ptf_port) dut_port_list.append(dut_port) self.original_dut_port_queue_scheduler_map = self.get_queue_scheduler_name(dut_port_list) - block_data_plane_scheduler_name = 'scheduler.block_data_plane' cmd_set_block_data_plane_scheduler = \ - f'sonic-db-cli CONFIG_DB hset "SCHEDULER|{block_data_plane_scheduler_name}" "type" DWRR "weight" 15 "pir" 1' + f'sonic-db-cli CONFIG_DB hset "SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}" "type" DWRR "weight" 15 "pir" 1' self.exec_cmd_on_dut(self.server, self.test_params['dut_username'], self.test_params['dut_password'], cmd_set_block_data_plane_scheduler) for dut_port in dut_port_list: for q in DATA_PLANE_QUEUE_LIST: cmd_block_q = \ - f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {block_data_plane_scheduler_name}" + f" sonic-db-cli CONFIG_DB hset 'QUEUE|{dut_port}|{q}' scheduler {BLOCK_DATA_PLANE_SCHEDULER_NAME}" self.exec_cmd_on_dut( self.server, self.test_params['dut_username'], self.test_params['dut_password'], cmd_block_q) @@ -276,6 +276,20 @@ def enable_mellanox_egress_data_plane(self, ptf_port_list): self.exec_cmd_on_dut( self.server, self.test_params['dut_username'], self.test_params['dut_password'], cmd_recover_q_scheduler_config) + self.remove_block_data_plan_scheduler() + + def remove_block_data_plan_scheduler(self): + get_block_data_plane_scheduler_name = \ + f"sonic-db-cli CONFIG_DB keys 'SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}'" + scheduler_name, _, _ = self.exec_cmd_on_dut(self.server, + self.test_params['dut_username'], + self.test_params['dut_password'], + get_block_data_plane_scheduler_name) + if isinstance(scheduler_name, list) and BLOCK_DATA_PLANE_SCHEDULER_NAME in scheduler_name[0]: + cmd_del_block_data_plane_scheduler = \ + f'sonic-db-cli CONFIG_DB del "SCHEDULER|{BLOCK_DATA_PLANE_SCHEDULER_NAME}"' + self.exec_cmd_on_dut(self.server, self.test_params['dut_username'], self.test_params['dut_password'], + cmd_del_block_data_plane_scheduler) class ThriftInterfaceDataPlane(ThriftInterface): diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 5a48ee24f50..75707b7c1df 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -5193,7 +5193,7 @@ def runTest(self): assert (q_wm_res[queue] <= (margin + 1) * cell_size) elif pkts_num_fill_min: assert (q_wm_res[queue] == 0) - elif 'cisco-8000' in asic_type or "SN5600" in hwsku or "SN5400" in hwsku: + elif 'cisco-8000' in asic_type or "SN56" in hwsku or "SN5400" in hwsku: assert (q_wm_res[queue] <= (margin + 1) * cell_size) else: if platform_asic and platform_asic == "broadcom-dnx":